keystone-9.0.0/0000775000567000056710000000000012701407246014525 5ustar jenkinsjenkins00000000000000keystone-9.0.0/etc/0000775000567000056710000000000012701407246015300 5ustar jenkinsjenkins00000000000000keystone-9.0.0/etc/sso_callback_template.html0000664000567000056710000000123112701407102022465 0ustar jenkinsjenkins00000000000000 Keystone WebSSO redirect
Please wait...
keystone-9.0.0/etc/policy.v3cloudsample.json0000664000567000056710000003311712701407102022246 0ustar jenkinsjenkins00000000000000{ "admin_required": "role:admin", "cloud_admin": "role:admin and (token.is_admin_project:True or domain_id:admin_domain_id)", "service_role": "role:service", "service_or_admin": "rule:admin_required or rule:service_role", "owner" : "user_id:%(user_id)s or user_id:%(target.token.user_id)s", "admin_or_owner": "(rule:admin_required and domain_id:%(target.token.user.domain.id)s) or rule:owner", "admin_and_matching_domain_id": "rule:admin_required and domain_id:%(domain_id)s", "service_admin_or_owner": "rule:service_or_admin or rule:owner", "default": "rule:admin_required", "identity:get_region": "", "identity:list_regions": "", "identity:create_region": "rule:cloud_admin", "identity:update_region": "rule:cloud_admin", "identity:delete_region": "rule:cloud_admin", "identity:get_service": "rule:admin_required", "identity:list_services": "rule:admin_required", "identity:create_service": "rule:cloud_admin", "identity:update_service": "rule:cloud_admin", "identity:delete_service": "rule:cloud_admin", "identity:get_endpoint": "rule:admin_required", "identity:list_endpoints": "rule:admin_required", "identity:create_endpoint": "rule:cloud_admin", "identity:update_endpoint": "rule:cloud_admin", "identity:delete_endpoint": "rule:cloud_admin", "identity:get_domain": "rule:cloud_admin or rule:admin_and_matching_domain_id", "identity:list_domains": "rule:cloud_admin", "identity:create_domain": "rule:cloud_admin", "identity:update_domain": "rule:cloud_admin", "identity:delete_domain": "rule:cloud_admin", "admin_and_matching_target_project_domain_id": "rule:admin_required and domain_id:%(target.project.domain_id)s", "admin_and_matching_project_domain_id": "rule:admin_required and domain_id:%(project.domain_id)s", "identity:get_project": "rule:cloud_admin or rule:admin_and_matching_target_project_domain_id or project_id:%(target.project.id)s", "identity:list_projects": "rule:cloud_admin or rule:admin_and_matching_domain_id", "identity:list_user_projects": "rule:owner or rule:admin_and_matching_domain_id", "identity:create_project": "rule:cloud_admin or rule:admin_and_matching_project_domain_id", "identity:update_project": "rule:cloud_admin or rule:admin_and_matching_target_project_domain_id", "identity:delete_project": "rule:cloud_admin or rule:admin_and_matching_target_project_domain_id", "admin_and_matching_target_user_domain_id": "rule:admin_required and domain_id:%(target.user.domain_id)s", "admin_and_matching_user_domain_id": "rule:admin_required and domain_id:%(user.domain_id)s", "identity:get_user": "rule:cloud_admin or rule:admin_and_matching_target_user_domain_id", "identity:list_users": "rule:cloud_admin or rule:admin_and_matching_domain_id", "identity:create_user": "rule:cloud_admin or rule:admin_and_matching_user_domain_id", "identity:update_user": "rule:cloud_admin or rule:admin_and_matching_target_user_domain_id", "identity:delete_user": "rule:cloud_admin or rule:admin_and_matching_target_user_domain_id", "admin_and_matching_target_group_domain_id": "rule:admin_required and domain_id:%(target.group.domain_id)s", "admin_and_matching_group_domain_id": "rule:admin_required and domain_id:%(group.domain_id)s", "identity:get_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id", "identity:list_groups": "rule:cloud_admin or rule:admin_and_matching_domain_id", "identity:list_groups_for_user": "rule:owner or rule:admin_and_matching_domain_id", "identity:create_group": "rule:cloud_admin or rule:admin_and_matching_group_domain_id", "identity:update_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id", "identity:delete_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id", "identity:list_users_in_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id", "identity:remove_user_from_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id", "identity:check_user_in_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id", "identity:add_user_to_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id", "identity:get_credential": "rule:admin_required", "identity:list_credentials": "rule:admin_required or user_id:%(user_id)s", "identity:create_credential": "rule:admin_required", "identity:update_credential": "rule:admin_required", "identity:delete_credential": "rule:admin_required", "identity:ec2_get_credential": "rule:admin_required or (rule:owner and user_id:%(target.credential.user_id)s)", "identity:ec2_list_credentials": "rule:admin_required or rule:owner", "identity:ec2_create_credential": "rule:admin_required or rule:owner", "identity:ec2_delete_credential": "rule:admin_required or (rule:owner and user_id:%(target.credential.user_id)s)", "identity:get_role": "rule:admin_required", "identity:list_roles": "rule:admin_required", "identity:create_role": "rule:cloud_admin", "identity:update_role": "rule:cloud_admin", "identity:delete_role": "rule:cloud_admin", "identity:get_domain_role": "rule:cloud_admin or rule:get_domain_roles", "identity:list_domain_roles": "rule:cloud_admin or rule:list_domain_roles", "identity:create_domain_role": "rule:cloud_admin or rule:domain_admin_matches_domain_role", "identity:update_domain_role": "rule:cloud_admin or rule:domain_admin_matches_target_domain_role", "identity:delete_domain_role": "rule:cloud_admin or rule:domain_admin_matches_target_domain_role", "domain_admin_matches_domain_role": "rule:admin_required and domain_id:%(role.domain_id)s", "get_domain_roles": "rule:domain_admin_matches_target_domain_role or rule:project_admin_matches_target_domain_role", "domain_admin_matches_target_domain_role": "rule:admin_required and domain_id:%(target.role.domain_id)s", "project_admin_matches_target_domain_role": "rule:admin_required and project_domain_id:%(target.role.domain_id)s", "list_domain_roles": "rule:domain_admin_matches_filter_on_list_domain_roles or rule:project_admin_matches_filter_on_list_domain_roles", "domain_admin_matches_filter_on_list_domain_roles": "rule:admin_required and domain_id:%(domain_id)s", "project_admin_matches_filter_on_list_domain_roles": "rule:admin_required and project_domain_id:%(domain_id)s", "identity:get_implied_role": "rule:cloud_admin", "identity:list_implied_roles": "rule:cloud_admin", "identity:create_implied_role": "rule:cloud_admin", "identity:delete_implied_role": "rule:cloud_admin", "identity:list_role_inference_rules": "rule:cloud_admin", "identity:check_implied_role": "rule:cloud_admin", "identity:check_grant": "rule:cloud_admin or rule:domain_admin_for_grants or rule:project_admin_for_grants", "identity:list_grants": "rule:cloud_admin or rule:domain_admin_for_list_grants or rule:project_admin_for_list_grants", "identity:create_grant": "rule:cloud_admin or rule:domain_admin_for_grants or rule:project_admin_for_grants", "identity:revoke_grant": "rule:cloud_admin or rule:domain_admin_for_grants or rule:project_admin_for_grants", "domain_admin_for_grants": "rule:domain_admin_for_global_role_grants or rule:domain_admin_for_domain_role_grants", "domain_admin_for_global_role_grants": "rule:admin_required and None:%(target.role.domain_id)s and rule:domain_admin_grant_match", "domain_admin_for_domain_role_grants": "rule:admin_required and domain_id:%(target.role.domain_id)s and rule:domain_admin_grant_match", "domain_admin_grant_match": "domain_id:%(domain_id)s or domain_id:%(target.project.domain_id)s", "project_admin_for_grants": "rule:project_admin_for_global_role_grants or rule:project_admin_for_domain_role_grants", "project_admin_for_global_role_grants": "rule:admin_required and None:%(target.role.domain_id)s and project_id:%(project_id)s", "project_admin_for_domain_role_grants": "rule:admin_required and project_domain_id:%(target.role.domain_id)s and project_id:%(project_id)s", "domain_admin_for_list_grants": "rule:admin_required and rule:domain_admin_grant_match", "project_admin_for_list_grants": "rule:admin_required and project_id:%(project_id)s", "admin_on_domain_filter" : "rule:admin_required and domain_id:%(scope.domain.id)s", "admin_on_project_filter" : "rule:admin_required and project_id:%(scope.project.id)s", "admin_on_domain_of_project_filter" : "rule:admin_required and domain_id:%(target.project.domain_id)s", "identity:list_role_assignments": "rule:cloud_admin or rule:admin_on_domain_filter or rule:admin_on_project_filter", "identity:list_role_assignments_for_tree": "rule:cloud_admin or rule:admin_on_domain_of_project_filter", "identity:get_policy": "rule:cloud_admin", "identity:list_policies": "rule:cloud_admin", "identity:create_policy": "rule:cloud_admin", "identity:update_policy": "rule:cloud_admin", "identity:delete_policy": "rule:cloud_admin", "identity:change_password": "rule:owner", "identity:check_token": "rule:admin_or_owner", "identity:validate_token": "rule:service_admin_or_owner", "identity:validate_token_head": "rule:service_or_admin", "identity:revocation_list": "rule:service_or_admin", "identity:revoke_token": "rule:admin_or_owner", "identity:create_trust": "user_id:%(trust.trustor_user_id)s", "identity:list_trusts": "", "identity:list_roles_for_trust": "", "identity:get_role_for_trust": "", "identity:delete_trust": "", "identity:create_consumer": "rule:admin_required", "identity:get_consumer": "rule:admin_required", "identity:list_consumers": "rule:admin_required", "identity:delete_consumer": "rule:admin_required", "identity:update_consumer": "rule:admin_required", "identity:authorize_request_token": "rule:admin_required", "identity:list_access_token_roles": "rule:admin_required", "identity:get_access_token_role": "rule:admin_required", "identity:list_access_tokens": "rule:admin_required", "identity:get_access_token": "rule:admin_required", "identity:delete_access_token": "rule:admin_required", "identity:list_projects_for_endpoint": "rule:admin_required", "identity:add_endpoint_to_project": "rule:admin_required", "identity:check_endpoint_in_project": "rule:admin_required", "identity:list_endpoints_for_project": "rule:admin_required", "identity:remove_endpoint_from_project": "rule:admin_required", "identity:create_endpoint_group": "rule:admin_required", "identity:list_endpoint_groups": "rule:admin_required", "identity:get_endpoint_group": "rule:admin_required", "identity:update_endpoint_group": "rule:admin_required", "identity:delete_endpoint_group": "rule:admin_required", "identity:list_projects_associated_with_endpoint_group": "rule:admin_required", "identity:list_endpoints_associated_with_endpoint_group": "rule:admin_required", "identity:get_endpoint_group_in_project": "rule:admin_required", "identity:list_endpoint_groups_for_project": "rule:admin_required", "identity:add_endpoint_group_to_project": "rule:admin_required", "identity:remove_endpoint_group_from_project": "rule:admin_required", "identity:create_identity_provider": "rule:cloud_admin", "identity:list_identity_providers": "rule:cloud_admin", "identity:get_identity_providers": "rule:cloud_admin", "identity:update_identity_provider": "rule:cloud_admin", "identity:delete_identity_provider": "rule:cloud_admin", "identity:create_protocol": "rule:cloud_admin", "identity:update_protocol": "rule:cloud_admin", "identity:get_protocol": "rule:cloud_admin", "identity:list_protocols": "rule:cloud_admin", "identity:delete_protocol": "rule:cloud_admin", "identity:create_mapping": "rule:cloud_admin", "identity:get_mapping": "rule:cloud_admin", "identity:list_mappings": "rule:cloud_admin", "identity:delete_mapping": "rule:cloud_admin", "identity:update_mapping": "rule:cloud_admin", "identity:create_service_provider": "rule:cloud_admin", "identity:list_service_providers": "rule:cloud_admin", "identity:get_service_provider": "rule:cloud_admin", "identity:update_service_provider": "rule:cloud_admin", "identity:delete_service_provider": "rule:cloud_admin", "identity:get_auth_catalog": "", "identity:get_auth_projects": "", "identity:get_auth_domains": "", "identity:list_projects_for_groups": "", "identity:list_domains_for_groups": "", "identity:list_revoke_events": "", "identity:create_policy_association_for_endpoint": "rule:cloud_admin", "identity:check_policy_association_for_endpoint": "rule:cloud_admin", "identity:delete_policy_association_for_endpoint": "rule:cloud_admin", "identity:create_policy_association_for_service": "rule:cloud_admin", "identity:check_policy_association_for_service": "rule:cloud_admin", "identity:delete_policy_association_for_service": "rule:cloud_admin", "identity:create_policy_association_for_region_and_service": "rule:cloud_admin", "identity:check_policy_association_for_region_and_service": "rule:cloud_admin", "identity:delete_policy_association_for_region_and_service": "rule:cloud_admin", "identity:get_policy_for_endpoint": "rule:cloud_admin", "identity:list_endpoints_for_policy": "rule:cloud_admin", "identity:create_domain_config": "rule:cloud_admin", "identity:get_domain_config": "rule:cloud_admin", "identity:update_domain_config": "rule:cloud_admin", "identity:delete_domain_config": "rule:cloud_admin", "identity:get_domain_config_default": "rule:cloud_admin" } keystone-9.0.0/etc/default_catalog.templates0000664000567000056710000000437712701407102022340 0ustar jenkinsjenkins00000000000000# config for templated.Catalog, using camelCase because I don't want to do # translations for keystone compat catalog.RegionOne.identity.publicURL = http://localhost:$(public_port)s/v2.0 catalog.RegionOne.identity.adminURL = http://localhost:$(admin_port)s/v2.0 catalog.RegionOne.identity.internalURL = http://localhost:$(public_port)s/v2.0 catalog.RegionOne.identity.name = Identity Service # fake compute service for now to help novaclient tests work catalog.RegionOne.computev21.publicURL = http://localhost:8774/v2.1/$(tenant_id)s catalog.RegionOne.computev21.adminURL = http://localhost:8774/v2.1/$(tenant_id)s catalog.RegionOne.computev21.internalURL = http://localhost:8774/v2.1/$(tenant_id)s catalog.RegionOne.computev21.name = Compute Service V2.1 catalog.RegionOne.volumev2.publicURL = http://localhost:8776/v2/$(tenant_id)s catalog.RegionOne.volumev2.adminURL = http://localhost:8776/v2/$(tenant_id)s catalog.RegionOne.volumev2.internalURL = http://localhost:8776/v2/$(tenant_id)s catalog.RegionOne.volumev2.name = Volume Service V2 catalog.RegionOne.ec2.publicURL = http://localhost:8773/services/Cloud catalog.RegionOne.ec2.adminURL = http://localhost:8773/services/Admin catalog.RegionOne.ec2.internalURL = http://localhost:8773/services/Cloud catalog.RegionOne.ec2.name = EC2 Service catalog.RegionOne.image.publicURL = http://localhost:9292 catalog.RegionOne.image.adminURL = http://localhost:9292 catalog.RegionOne.image.internalURL = http://localhost:9292 catalog.RegionOne.image.name = Image Service catalog.RegionOne.network.publicURL = http://localhost:9696 catalog.RegionOne.network.adminURL = http://localhost:9696 catalog.RegionOne.network.internalURL = http://localhost:9696 catalog.RegionOne.network.name = Network Service catalog.RegionOne.orchestration.publicURL = http://localhost:8004/v1/$(tenant_id)s catalog.RegionOne.orchestration.adminURL = http://localhost:8004/v1/$(tenant_id)s catalog.RegionOne.orchestration.internalURL = http://localhost:8004/v1/$(tenant_id)s catalog.RegionOne.orchestration.name = Orchestration Service catalog.RegionOne.metering.publicURL = http://localhost:8777 catalog.RegionOne.metering.adminURL = http://localhost:8777 catalog.RegionOne.metering.internalURL = http://localhost:8777 catalog.RegionOne.metering.name = Telemetry Service keystone-9.0.0/etc/logging.conf.sample0000664000567000056710000000202612701407102021044 0ustar jenkinsjenkins00000000000000[loggers] keys=root,access [handlers] keys=production,file,access_file,devel [formatters] keys=minimal,normal,debug ########### # Loggers # ########### [logger_root] level=WARNING handlers=file [logger_access] level=INFO qualname=access handlers=access_file ################ # Log Handlers # ################ [handler_production] class=handlers.SysLogHandler level=ERROR formatter=normal args=(('localhost', handlers.SYSLOG_UDP_PORT), handlers.SysLogHandler.LOG_USER) [handler_file] class=handlers.WatchedFileHandler level=WARNING formatter=normal args=('error.log',) [handler_access_file] class=handlers.WatchedFileHandler level=INFO formatter=minimal args=('access.log',) [handler_devel] class=StreamHandler level=NOTSET formatter=debug args=(sys.stdout,) ################## # Log Formatters # ################## [formatter_minimal] format=%(message)s [formatter_normal] format=(%(name)s): %(asctime)s %(levelname)s %(message)s [formatter_debug] format=(%(name)s): %(asctime)s %(levelname)s %(module)s %(funcName)s %(message)s keystone-9.0.0/etc/policy.json0000664000567000056710000002274312701407102017471 0ustar jenkinsjenkins00000000000000{ "admin_required": "role:admin or is_admin:1", "service_role": "role:service", "service_or_admin": "rule:admin_required or rule:service_role", "owner" : "user_id:%(user_id)s", "admin_or_owner": "rule:admin_required or rule:owner", "token_subject": "user_id:%(target.token.user_id)s", "admin_or_token_subject": "rule:admin_required or rule:token_subject", "service_admin_or_token_subject": "rule:service_or_admin or rule:token_subject", "default": "rule:admin_required", "identity:get_region": "", "identity:list_regions": "", "identity:create_region": "rule:admin_required", "identity:update_region": "rule:admin_required", "identity:delete_region": "rule:admin_required", "identity:get_service": "rule:admin_required", "identity:list_services": "rule:admin_required", "identity:create_service": "rule:admin_required", "identity:update_service": "rule:admin_required", "identity:delete_service": "rule:admin_required", "identity:get_endpoint": "rule:admin_required", "identity:list_endpoints": "rule:admin_required", "identity:create_endpoint": "rule:admin_required", "identity:update_endpoint": "rule:admin_required", "identity:delete_endpoint": "rule:admin_required", "identity:get_domain": "rule:admin_required", "identity:list_domains": "rule:admin_required", "identity:create_domain": "rule:admin_required", "identity:update_domain": "rule:admin_required", "identity:delete_domain": "rule:admin_required", "identity:get_project": "rule:admin_required or project_id:%(target.project.id)s", "identity:list_projects": "rule:admin_required", "identity:list_user_projects": "rule:admin_or_owner", "identity:create_project": "rule:admin_required", "identity:update_project": "rule:admin_required", "identity:delete_project": "rule:admin_required", "identity:get_user": "rule:admin_required", "identity:list_users": "rule:admin_required", "identity:create_user": "rule:admin_required", "identity:update_user": "rule:admin_required", "identity:delete_user": "rule:admin_required", "identity:change_password": "rule:admin_or_owner", "identity:get_group": "rule:admin_required", "identity:list_groups": "rule:admin_required", "identity:list_groups_for_user": "rule:admin_or_owner", "identity:create_group": "rule:admin_required", "identity:update_group": "rule:admin_required", "identity:delete_group": "rule:admin_required", "identity:list_users_in_group": "rule:admin_required", "identity:remove_user_from_group": "rule:admin_required", "identity:check_user_in_group": "rule:admin_required", "identity:add_user_to_group": "rule:admin_required", "identity:get_credential": "rule:admin_required", "identity:list_credentials": "rule:admin_required", "identity:create_credential": "rule:admin_required", "identity:update_credential": "rule:admin_required", "identity:delete_credential": "rule:admin_required", "identity:ec2_get_credential": "rule:admin_required or (rule:owner and user_id:%(target.credential.user_id)s)", "identity:ec2_list_credentials": "rule:admin_or_owner", "identity:ec2_create_credential": "rule:admin_or_owner", "identity:ec2_delete_credential": "rule:admin_required or (rule:owner and user_id:%(target.credential.user_id)s)", "identity:get_role": "rule:admin_required", "identity:list_roles": "rule:admin_required", "identity:create_role": "rule:admin_required", "identity:update_role": "rule:admin_required", "identity:delete_role": "rule:admin_required", "identity:get_domain_role": "rule:admin_required", "identity:list_domain_roles": "rule:admin_required", "identity:create_domain_role": "rule:admin_required", "identity:update_domain_role": "rule:admin_required", "identity:delete_domain_role": "rule:admin_required", "identity:get_implied_role": "rule:admin_required ", "identity:list_implied_roles": "rule:admin_required", "identity:create_implied_role": "rule:admin_required", "identity:delete_implied_role": "rule:admin_required", "identity:list_role_inference_rules": "rule:admin_required", "identity:check_implied_role": "rule:admin_required", "identity:check_grant": "rule:admin_required", "identity:list_grants": "rule:admin_required", "identity:create_grant": "rule:admin_required", "identity:revoke_grant": "rule:admin_required", "identity:list_role_assignments": "rule:admin_required", "identity:list_role_assignments_for_tree": "rule:admin_required", "identity:get_policy": "rule:admin_required", "identity:list_policies": "rule:admin_required", "identity:create_policy": "rule:admin_required", "identity:update_policy": "rule:admin_required", "identity:delete_policy": "rule:admin_required", "identity:check_token": "rule:admin_or_token_subject", "identity:validate_token": "rule:service_admin_or_token_subject", "identity:validate_token_head": "rule:service_or_admin", "identity:revocation_list": "rule:service_or_admin", "identity:revoke_token": "rule:admin_or_token_subject", "identity:create_trust": "user_id:%(trust.trustor_user_id)s", "identity:list_trusts": "", "identity:list_roles_for_trust": "", "identity:get_role_for_trust": "", "identity:delete_trust": "", "identity:create_consumer": "rule:admin_required", "identity:get_consumer": "rule:admin_required", "identity:list_consumers": "rule:admin_required", "identity:delete_consumer": "rule:admin_required", "identity:update_consumer": "rule:admin_required", "identity:authorize_request_token": "rule:admin_required", "identity:list_access_token_roles": "rule:admin_required", "identity:get_access_token_role": "rule:admin_required", "identity:list_access_tokens": "rule:admin_required", "identity:get_access_token": "rule:admin_required", "identity:delete_access_token": "rule:admin_required", "identity:list_projects_for_endpoint": "rule:admin_required", "identity:add_endpoint_to_project": "rule:admin_required", "identity:check_endpoint_in_project": "rule:admin_required", "identity:list_endpoints_for_project": "rule:admin_required", "identity:remove_endpoint_from_project": "rule:admin_required", "identity:create_endpoint_group": "rule:admin_required", "identity:list_endpoint_groups": "rule:admin_required", "identity:get_endpoint_group": "rule:admin_required", "identity:update_endpoint_group": "rule:admin_required", "identity:delete_endpoint_group": "rule:admin_required", "identity:list_projects_associated_with_endpoint_group": "rule:admin_required", "identity:list_endpoints_associated_with_endpoint_group": "rule:admin_required", "identity:get_endpoint_group_in_project": "rule:admin_required", "identity:list_endpoint_groups_for_project": "rule:admin_required", "identity:add_endpoint_group_to_project": "rule:admin_required", "identity:remove_endpoint_group_from_project": "rule:admin_required", "identity:create_identity_provider": "rule:admin_required", "identity:list_identity_providers": "rule:admin_required", "identity:get_identity_providers": "rule:admin_required", "identity:update_identity_provider": "rule:admin_required", "identity:delete_identity_provider": "rule:admin_required", "identity:create_protocol": "rule:admin_required", "identity:update_protocol": "rule:admin_required", "identity:get_protocol": "rule:admin_required", "identity:list_protocols": "rule:admin_required", "identity:delete_protocol": "rule:admin_required", "identity:create_mapping": "rule:admin_required", "identity:get_mapping": "rule:admin_required", "identity:list_mappings": "rule:admin_required", "identity:delete_mapping": "rule:admin_required", "identity:update_mapping": "rule:admin_required", "identity:create_service_provider": "rule:admin_required", "identity:list_service_providers": "rule:admin_required", "identity:get_service_provider": "rule:admin_required", "identity:update_service_provider": "rule:admin_required", "identity:delete_service_provider": "rule:admin_required", "identity:get_auth_catalog": "", "identity:get_auth_projects": "", "identity:get_auth_domains": "", "identity:list_projects_for_groups": "", "identity:list_domains_for_groups": "", "identity:list_revoke_events": "", "identity:create_policy_association_for_endpoint": "rule:admin_required", "identity:check_policy_association_for_endpoint": "rule:admin_required", "identity:delete_policy_association_for_endpoint": "rule:admin_required", "identity:create_policy_association_for_service": "rule:admin_required", "identity:check_policy_association_for_service": "rule:admin_required", "identity:delete_policy_association_for_service": "rule:admin_required", "identity:create_policy_association_for_region_and_service": "rule:admin_required", "identity:check_policy_association_for_region_and_service": "rule:admin_required", "identity:delete_policy_association_for_region_and_service": "rule:admin_required", "identity:get_policy_for_endpoint": "rule:admin_required", "identity:list_endpoints_for_policy": "rule:admin_required", "identity:create_domain_config": "rule:admin_required", "identity:get_domain_config": "rule:admin_required", "identity:update_domain_config": "rule:admin_required", "identity:delete_domain_config": "rule:admin_required", "identity:get_domain_config_default": "rule:admin_required" } keystone-9.0.0/etc/keystone-paste.ini0000664000567000056710000000454012701407102020746 0ustar jenkinsjenkins00000000000000# Keystone PasteDeploy configuration file. [filter:debug] use = egg:oslo.middleware#debug [filter:request_id] use = egg:oslo.middleware#request_id [filter:build_auth_context] use = egg:keystone#build_auth_context [filter:token_auth] use = egg:keystone#token_auth [filter:admin_token_auth] # This is deprecated in the M release and will be removed in the O release. # Use `keystone-manage bootstrap` and remove this from the pipelines below. use = egg:keystone#admin_token_auth [filter:json_body] use = egg:keystone#json_body [filter:cors] use = egg:oslo.middleware#cors oslo_config_project = keystone [filter:ec2_extension] use = egg:keystone#ec2_extension [filter:ec2_extension_v3] use = egg:keystone#ec2_extension_v3 [filter:s3_extension] use = egg:keystone#s3_extension [filter:url_normalize] use = egg:keystone#url_normalize [filter:sizelimit] use = egg:oslo.middleware#sizelimit [app:public_service] use = egg:keystone#public_service [app:service_v3] use = egg:keystone#service_v3 [app:admin_service] use = egg:keystone#admin_service [pipeline:public_api] # The last item in this pipeline must be public_service or an equivalent # application. It cannot be a filter. pipeline = cors sizelimit url_normalize request_id admin_token_auth build_auth_context token_auth json_body ec2_extension public_service [pipeline:admin_api] # The last item in this pipeline must be admin_service or an equivalent # application. It cannot be a filter. pipeline = cors sizelimit url_normalize request_id admin_token_auth build_auth_context token_auth json_body ec2_extension s3_extension admin_service [pipeline:api_v3] # The last item in this pipeline must be service_v3 or an equivalent # application. It cannot be a filter. pipeline = cors sizelimit url_normalize request_id admin_token_auth build_auth_context token_auth json_body ec2_extension_v3 s3_extension service_v3 [app:public_version_service] use = egg:keystone#public_version_service [app:admin_version_service] use = egg:keystone#admin_version_service [pipeline:public_version_api] pipeline = cors sizelimit url_normalize public_version_service [pipeline:admin_version_api] pipeline = cors sizelimit url_normalize admin_version_service [composite:main] use = egg:Paste#urlmap /v2.0 = public_api /v3 = api_v3 / = public_version_api [composite:admin] use = egg:Paste#urlmap /v2.0 = admin_api /v3 = api_v3 / = admin_version_api keystone-9.0.0/etc/keystone.conf.sample0000664000567000056710000021661512701407102021272 0ustar jenkinsjenkins00000000000000[DEFAULT] # # From keystone # # A "shared secret" that can be used to bootstrap Keystone. This "token" does # not represent a user, and carries no explicit authorization. If set to # `None`, the value is ignored and the `admin_token` log in mechanism is # effectively disabled. To completely disable `admin_token` in production # (highly recommended), remove AdminTokenAuthMiddleware from your paste # application pipelines (for example, in keystone-paste.ini). (string value) #admin_token = # The base public endpoint URL for Keystone that is advertised to clients # (NOTE: this does NOT affect how Keystone listens for connections). Defaults # to the base host URL of the request. E.g. a request to # http://server:5000/v3/users will default to http://server:5000. You should # only need to set this value if the base URL contains a path (e.g. /prefix/v3) # or the endpoint should be found on a different server. (string value) #public_endpoint = # The base admin endpoint URL for Keystone that is advertised to clients (NOTE: # this does NOT affect how Keystone listens for connections). Defaults to the # base host URL of the request. E.g. a request to http://server:35357/v3/users # will default to http://server:35357. You should only need to set this value # if the base URL contains a path (e.g. /prefix/v3) or the endpoint should be # found on a different server. (string value) #admin_endpoint = # Maximum depth of the project hierarchy, excluding the project acting as a # domain at the top of the hierarchy. WARNING: setting it to a large value may # adversely impact performance. (integer value) #max_project_tree_depth = 5 # Limit the sizes of user & project ID/names. (integer value) #max_param_size = 64 # Similar to max_param_size, but provides an exception for token values. # (integer value) #max_token_size = 8192 # Similar to the member_role_name option, this represents the default role ID # used to associate users with their default projects in the v2 API. This will # be used as the explicit role where one is not specified by the v2 API. # (string value) #member_role_id = 9fe2ff9ee4384b1894a90878d3e92bab # This is the role name used in combination with the member_role_id option; see # that option for more detail. (string value) #member_role_name = _member_ # The value passed as the keyword "rounds" to passlib's encrypt method. # (integer value) # Minimum value: 1000 # Maximum value: 100000 #crypt_strength = 10000 # The maximum number of entities that will be returned in a collection, with no # limit set by default. This global limit may be then overridden for a specific # driver, by specifying a list_limit in the appropriate section (e.g. # [assignment]). (integer value) #list_limit = # Set this to false if you want to enable the ability for user, group and # project entities to be moved between domains by updating their domain_id. # Allowing such movement is not recommended if the scope of a domain admin is # being restricted by use of an appropriate policy file (see # policy.v3cloudsample as an example). This ability is deprecated and will be # removed in a future release. (boolean value) # This option is deprecated for removal. # Its value may be silently ignored in the future. #domain_id_immutable = true # If set to true, strict password length checking is performed for password # manipulation. If a password exceeds the maximum length, the operation will # fail with an HTTP 403 Forbidden error. If set to false, passwords are # automatically truncated to the maximum length. (boolean value) #strict_password_check = false # The HTTP header used to determine the scheme for the original request, even # if it was removed by an SSL terminating proxy. (string value) #secure_proxy_ssl_header = HTTP_X_FORWARDED_PROTO # If set to true the server will return information in the response that may # allow an unauthenticated or authenticated user to get more information than # normal, such as why authentication failed. This may be useful for debugging # but is insecure. (boolean value) #insecure_debug = false # # From keystone.notifications # # Default publisher_id for outgoing notifications (string value) #default_publisher_id = # Define the notification format for Identity Service events. A "basic" # notification has information about the resource being operated on. A "cadf" # notification has the same information, as well as information about the # initiator of the event. (string value) # Allowed values: basic, cadf #notification_format = basic # Define the notification options to opt-out from. The value expected is: # identity... This field can be set multiple times in # order to add more notifications to opt-out from. For example: # notification_opt_out=identity.user.created # notification_opt_out=identity.authenticate.success (multi valued) #notification_opt_out = # # From oslo.log # # If set to true, the logging level will be set to DEBUG instead of the default # INFO level. (boolean value) #debug = false # If set to false, the logging level will be set to WARNING instead of the # default INFO level. (boolean value) # This option is deprecated for removal. # Its value may be silently ignored in the future. #verbose = true # The name of a logging configuration file. This file is appended to any # existing logging configuration files. For details about logging configuration # files, see the Python logging module documentation. Note that when logging # configuration files are used then all logging configuration is set in the # configuration file and other logging configuration options are ignored (for # example, logging_context_format_string). (string value) # Deprecated group/name - [DEFAULT]/log_config #log_config_append = # Defines the format string for %%(asctime)s in log records. Default: # %(default)s . This option is ignored if log_config_append is set. (string # value) #log_date_format = %Y-%m-%d %H:%M:%S # (Optional) Name of log file to send logging output to. If no default is set, # logging will go to stderr as defined by use_stderr. This option is ignored if # log_config_append is set. (string value) # Deprecated group/name - [DEFAULT]/logfile #log_file = # (Optional) The base directory used for relative log_file paths. This option # is ignored if log_config_append is set. (string value) # Deprecated group/name - [DEFAULT]/logdir #log_dir = # Uses logging handler designed to watch file system. When log file is moved or # removed this handler will open a new log file with specified path # instantaneously. It makes sense only if log_file option is specified and # Linux platform is used. This option is ignored if log_config_append is set. # (boolean value) #watch_log_file = false # Use syslog for logging. Existing syslog format is DEPRECATED and will be # changed later to honor RFC5424. This option is ignored if log_config_append # is set. (boolean value) #use_syslog = false # Syslog facility to receive log lines. This option is ignored if # log_config_append is set. (string value) #syslog_log_facility = LOG_USER # Log output to standard error. This option is ignored if log_config_append is # set. (boolean value) #use_stderr = true # Format string to use for log messages with context. (string value) #logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s # Format string to use for log messages when context is undefined. (string # value) #logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s # Additional data to append to log message when logging level for the message # is DEBUG. (string value) #logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d # Prefix each line of exception output with this format. (string value) #logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s # Defines the format string for %(user_identity)s that is used in # logging_context_format_string. (string value) #logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s # List of package logging levels in logger=LEVEL pairs. This option is ignored # if log_config_append is set. (list value) #default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO # Enables or disables publication of error events. (boolean value) #publish_errors = false # The format for an instance that is passed with the log message. (string # value) #instance_format = "[instance: %(uuid)s] " # The format for an instance UUID that is passed with the log message. (string # value) #instance_uuid_format = "[instance: %(uuid)s] " # Enables or disables fatal status of deprecations. (boolean value) #fatal_deprecations = false # # From oslo.messaging # # Size of RPC connection pool. (integer value) # Deprecated group/name - [DEFAULT]/rpc_conn_pool_size #rpc_conn_pool_size = 30 # ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. # The "host" option should point or resolve to this address. (string value) #rpc_zmq_bind_address = * # MatchMaker driver. (string value) # Allowed values: redis, dummy #rpc_zmq_matchmaker = redis # Type of concurrency used. Either "native" or "eventlet" (string value) #rpc_zmq_concurrency = eventlet # Number of ZeroMQ contexts, defaults to 1. (integer value) #rpc_zmq_contexts = 1 # Maximum number of ingress messages to locally buffer per topic. Default is # unlimited. (integer value) #rpc_zmq_topic_backlog = # Directory for holding IPC sockets. (string value) #rpc_zmq_ipc_dir = /var/run/openstack # Name of this node. Must be a valid hostname, FQDN, or IP address. Must match # "host" option, if running Nova. (string value) #rpc_zmq_host = localhost # Seconds to wait before a cast expires (TTL). The default value of -1 # specifies an infinite linger period. The value of 0 specifies no linger # period. Pending messages shall be discarded immediately when the socket is # closed. Only supported by impl_zmq. (integer value) #rpc_cast_timeout = -1 # The default number of seconds that poll should wait. Poll raises timeout # exception when timeout expired. (integer value) #rpc_poll_timeout = 1 # Expiration timeout in seconds of a name service record about existing target # ( < 0 means no timeout). (integer value) #zmq_target_expire = 120 # Use PUB/SUB pattern for fanout methods. PUB/SUB always uses proxy. (boolean # value) #use_pub_sub = true # Minimal port number for random ports range. (port value) # Minimum value: 0 # Maximum value: 65535 #rpc_zmq_min_port = 49152 # Maximal port number for random ports range. (integer value) # Minimum value: 1 # Maximum value: 65536 #rpc_zmq_max_port = 65536 # Number of retries to find free port number before fail with ZMQBindError. # (integer value) #rpc_zmq_bind_port_retries = 100 # Size of executor thread pool. (integer value) # Deprecated group/name - [DEFAULT]/rpc_thread_pool_size #executor_thread_pool_size = 64 # Seconds to wait for a response from a call. (integer value) #rpc_response_timeout = 60 # A URL representing the messaging driver to use and its full configuration. If # not set, we fall back to the rpc_backend option and driver specific # configuration. (string value) #transport_url = # The messaging driver to use, defaults to rabbit. Other drivers include amqp # and zmq. (string value) #rpc_backend = rabbit # The default exchange under which topics are scoped. May be overridden by an # exchange name specified in the transport_url option. (string value) #control_exchange = keystone # # From oslo.service.service # # Enable eventlet backdoor. Acceptable values are 0, , and # :, where 0 results in listening on a random tcp port number; # results in listening on the specified port number (and not enabling # backdoor if that port is in use); and : results in listening on # the smallest unused port number within the specified range of port numbers. # The chosen port is displayed in the service's log file. (string value) #backdoor_port = # Enable eventlet backdoor, using the provided path as a unix socket that can # receive connections. This option is mutually exclusive with 'backdoor_port' # in that only one should be provided. If both are provided then the existence # of this option overrides the usage of that option. (string value) #backdoor_socket = # Enables or disables logging values of all registered options when starting a # service (at DEBUG level). (boolean value) #log_options = true # Specify a timeout after which a gracefully shutdown server will exit. Zero # value means endless wait. (integer value) #graceful_shutdown_timeout = 60 [assignment] # # From keystone # # Entrypoint for the assignment backend driver in the keystone.assignment # namespace. Only an SQL driver is supplied. If an assignment driver is not # specified, the identity driver will choose the assignment driver (driver # selection based on `[identity]/driver` option is deprecated and will be # removed in the "O" release). (string value) #driver = # A list of role names which are prohibited from being an implied role. (list # value) #prohibited_implied_role = admin [auth] # # From keystone # # Allowed authentication methods. (list value) #methods = external,password,token,oauth1 # Entrypoint for the password auth plugin module in the keystone.auth.password # namespace. (string value) #password = # Entrypoint for the token auth plugin module in the keystone.auth.token # namespace. (string value) #token = # Entrypoint for the external (REMOTE_USER) auth plugin module in the # keystone.auth.external namespace. Supplied drivers are DefaultDomain and # Domain. The default driver is DefaultDomain. (string value) #external = # Entrypoint for the oAuth1.0 auth plugin module in the keystone.auth.oauth1 # namespace. (string value) #oauth1 = [cache] # # From oslo.cache # # Prefix for building the configuration dictionary for the cache region. This # should not need to be changed unless there is another dogpile.cache region # with the same configuration name. (string value) #config_prefix = cache.oslo # Default TTL, in seconds, for any cached item in the dogpile.cache region. # This applies to any cached method that doesn't have an explicit cache # expiration time defined for it. (integer value) #expiration_time = 600 # Dogpile.cache backend module. It is recommended that Memcache with pooling # (oslo_cache.memcache_pool) or Redis (dogpile.cache.redis) be used in # production deployments. Small workloads (single process) like devstack can # use the dogpile.cache.memory backend. (string value) #backend = dogpile.cache.null # Arguments supplied to the backend module. Specify this option once per # argument to be passed to the dogpile.cache backend. Example format: # ":". (multi valued) #backend_argument = # Proxy classes to import that will affect the way the dogpile.cache backend # functions. See the dogpile.cache documentation on changing-backend-behavior. # (list value) #proxies = # Global toggle for caching. (boolean value) #enabled = false # Extra debugging from the cache backend (cache keys, get/set/delete/etc # calls). This is only really useful if you need to see the specific cache- # backend get/set/delete calls with the keys/values. Typically this should be # left set to false. (boolean value) #debug_cache_backend = false # Memcache servers in the format of "host:port". (dogpile.cache.memcache and # oslo_cache.memcache_pool backends only). (list value) #memcache_servers = localhost:11211 # Number of seconds memcached server is considered dead before it is tried # again. (dogpile.cache.memcache and oslo_cache.memcache_pool backends only). # (integer value) #memcache_dead_retry = 300 # Timeout in seconds for every call to a server. (dogpile.cache.memcache and # oslo_cache.memcache_pool backends only). (integer value) #memcache_socket_timeout = 3 # Max total number of open connections to every memcached server. # (oslo_cache.memcache_pool backend only). (integer value) #memcache_pool_maxsize = 10 # Number of seconds a connection to memcached is held unused in the pool before # it is closed. (oslo_cache.memcache_pool backend only). (integer value) #memcache_pool_unused_timeout = 60 # Number of seconds that an operation will wait to get a memcache client # connection. (integer value) #memcache_pool_connection_get_timeout = 10 [catalog] # # From keystone # # Catalog template file name for use with the template catalog backend. (string # value) #template_file = default_catalog.templates # Entrypoint for the catalog backend driver in the keystone.catalog namespace. # Supplied drivers are kvs, sql, templated, and endpoint_filter.sql (string # value) #driver = sql # Toggle for catalog caching. This has no effect unless global caching is # enabled. (boolean value) #caching = true # Time to cache catalog data (in seconds). This has no effect unless global and # catalog caching are enabled. (integer value) #cache_time = # Maximum number of entities that will be returned in a catalog collection. # (integer value) #list_limit = [cors] # # From oslo.middleware # # Indicate whether this resource may be shared with the domain received in the # requests "origin" header. (list value) #allowed_origin = # Indicate that the actual request can include user credentials (boolean value) #allow_credentials = true # Indicate which headers are safe to expose to the API. Defaults to HTTP Simple # Headers. (list value) #expose_headers = X-Auth-Token,X-Openstack-Request-Id,X-Subject-Token # Maximum cache age of CORS preflight requests. (integer value) #max_age = 3600 # Indicate which methods can be used during the actual request. (list value) #allow_methods = GET,PUT,POST,DELETE,PATCH # Indicate which header field names may be used during the actual request. # (list value) #allow_headers = X-Auth-Token,X-Openstack-Request-Id,X-Subject-Token,X-Project-Id,X-Project-Name,X-Project-Domain-Id,X-Project-Domain-Name,X-Domain-Id,X-Domain-Name [cors.subdomain] # # From oslo.middleware # # Indicate whether this resource may be shared with the domain received in the # requests "origin" header. (list value) #allowed_origin = # Indicate that the actual request can include user credentials (boolean value) #allow_credentials = true # Indicate which headers are safe to expose to the API. Defaults to HTTP Simple # Headers. (list value) #expose_headers = X-Auth-Token,X-Openstack-Request-Id,X-Subject-Token # Maximum cache age of CORS preflight requests. (integer value) #max_age = 3600 # Indicate which methods can be used during the actual request. (list value) #allow_methods = GET,PUT,POST,DELETE,PATCH # Indicate which header field names may be used during the actual request. # (list value) #allow_headers = X-Auth-Token,X-Openstack-Request-Id,X-Subject-Token,X-Project-Id,X-Project-Name,X-Project-Domain-Id,X-Project-Domain-Name,X-Domain-Id,X-Domain-Name [credential] # # From keystone # # Entrypoint for the credential backend driver in the keystone.credential # namespace. (string value) #driver = sql [database] # # From oslo.db # # The file name to use with SQLite. (string value) # Deprecated group/name - [DEFAULT]/sqlite_db #sqlite_db = oslo.sqlite # If True, SQLite uses synchronous mode. (boolean value) # Deprecated group/name - [DEFAULT]/sqlite_synchronous #sqlite_synchronous = true # The back end to use for the database. (string value) # Deprecated group/name - [DEFAULT]/db_backend #backend = sqlalchemy # The SQLAlchemy connection string to use to connect to the database. (string # value) # Deprecated group/name - [DEFAULT]/sql_connection # Deprecated group/name - [DATABASE]/sql_connection # Deprecated group/name - [sql]/connection #connection = # The SQLAlchemy connection string to use to connect to the slave database. # (string value) #slave_connection = # The SQL mode to be used for MySQL sessions. This option, including the # default, overrides any server-set SQL mode. To use whatever SQL mode is set # by the server configuration, set this to no value. Example: mysql_sql_mode= # (string value) #mysql_sql_mode = TRADITIONAL # Timeout before idle SQL connections are reaped. (integer value) # Deprecated group/name - [DEFAULT]/sql_idle_timeout # Deprecated group/name - [DATABASE]/sql_idle_timeout # Deprecated group/name - [sql]/idle_timeout #idle_timeout = 3600 # Minimum number of SQL connections to keep open in a pool. (integer value) # Deprecated group/name - [DEFAULT]/sql_min_pool_size # Deprecated group/name - [DATABASE]/sql_min_pool_size #min_pool_size = 1 # Maximum number of SQL connections to keep open in a pool. (integer value) # Deprecated group/name - [DEFAULT]/sql_max_pool_size # Deprecated group/name - [DATABASE]/sql_max_pool_size #max_pool_size = # Maximum number of database connection retries during startup. Set to -1 to # specify an infinite retry count. (integer value) # Deprecated group/name - [DEFAULT]/sql_max_retries # Deprecated group/name - [DATABASE]/sql_max_retries #max_retries = 10 # Interval between retries of opening a SQL connection. (integer value) # Deprecated group/name - [DEFAULT]/sql_retry_interval # Deprecated group/name - [DATABASE]/reconnect_interval #retry_interval = 10 # If set, use this value for max_overflow with SQLAlchemy. (integer value) # Deprecated group/name - [DEFAULT]/sql_max_overflow # Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow #max_overflow = 50 # Verbosity of SQL debugging information: 0=None, 100=Everything. (integer # value) # Deprecated group/name - [DEFAULT]/sql_connection_debug #connection_debug = 0 # Add Python stack traces to SQL as comment strings. (boolean value) # Deprecated group/name - [DEFAULT]/sql_connection_trace #connection_trace = false # If set, use this value for pool_timeout with SQLAlchemy. (integer value) # Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout #pool_timeout = # Enable the experimental use of database reconnect on connection lost. # (boolean value) #use_db_reconnect = false # Seconds between retries of a database transaction. (integer value) #db_retry_interval = 1 # If True, increases the interval between retries of a database operation up to # db_max_retry_interval. (boolean value) #db_inc_retry_interval = true # If db_inc_retry_interval is set, the maximum seconds between retries of a # database operation. (integer value) #db_max_retry_interval = 10 # Maximum retries in case of connection error or deadlock error before error is # raised. Set to -1 to specify an infinite retry count. (integer value) #db_max_retries = 20 [domain_config] # # From keystone # # Entrypoint for the domain config backend driver in the # keystone.resource.domain_config namespace. (string value) #driver = sql # Toggle for domain config caching. This has no effect unless global caching is # enabled. (boolean value) #caching = true # TTL (in seconds) to cache domain config data. This has no effect unless # domain config caching is enabled. (integer value) #cache_time = 300 [endpoint_filter] # # From keystone # # Entrypoint for the endpoint filter backend driver in the # keystone.endpoint_filter namespace. (string value) #driver = sql # Toggle to return all active endpoints if no filter exists. (boolean value) #return_all_endpoints_if_no_filter = true [endpoint_policy] # # From keystone # # Enable endpoint_policy functionality. (boolean value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: The option to enable the OS-ENDPOINT-POLICY extension has been # deprecated in the M release and will be removed in the O release. The OS- # ENDPOINT-POLICY extension will be enabled by default. #enabled = true # Entrypoint for the endpoint policy backend driver in the # keystone.endpoint_policy namespace. (string value) #driver = sql [eventlet_server] # # From keystone # # The number of worker processes to serve the public eventlet application. # Defaults to number of CPUs (minimum of 2). (integer value) # Deprecated group/name - [DEFAULT]/public_workers # This option is deprecated for removal. # Its value may be silently ignored in the future. #public_workers = # The number of worker processes to serve the admin eventlet application. # Defaults to number of CPUs (minimum of 2). (integer value) # Deprecated group/name - [DEFAULT]/admin_workers # This option is deprecated for removal. # Its value may be silently ignored in the future. #admin_workers = # The IP address of the network interface for the public service to listen on. # (string value) # Deprecated group/name - [DEFAULT]/bind_host # Deprecated group/name - [DEFAULT]/public_bind_host # This option is deprecated for removal. # Its value may be silently ignored in the future. #public_bind_host = 0.0.0.0 # The port number which the public service listens on. (port value) # Minimum value: 0 # Maximum value: 65535 # Deprecated group/name - [DEFAULT]/public_port # This option is deprecated for removal. # Its value may be silently ignored in the future. #public_port = 5000 # The IP address of the network interface for the admin service to listen on. # (string value) # Deprecated group/name - [DEFAULT]/bind_host # Deprecated group/name - [DEFAULT]/admin_bind_host # This option is deprecated for removal. # Its value may be silently ignored in the future. #admin_bind_host = 0.0.0.0 # The port number which the admin service listens on. (port value) # Minimum value: 0 # Maximum value: 65535 # Deprecated group/name - [DEFAULT]/admin_port # This option is deprecated for removal. # Its value may be silently ignored in the future. #admin_port = 35357 # If set to false, disables keepalives on the server; all connections will be # closed after serving one request. (boolean value) #wsgi_keep_alive = true # Timeout for socket operations on a client connection. If an incoming # connection is idle for this number of seconds it will be closed. A value of # "0" means wait forever. (integer value) #client_socket_timeout = 900 # Set this to true if you want to enable TCP_KEEPALIVE on server sockets, i.e. # sockets used by the Keystone wsgi server for client connections. (boolean # value) # Deprecated group/name - [DEFAULT]/tcp_keepalive # This option is deprecated for removal. # Its value may be silently ignored in the future. #tcp_keepalive = false # Sets the value of TCP_KEEPIDLE in seconds for each server socket. Only # applies if tcp_keepalive is true. Ignored if system does not support it. # (integer value) # Deprecated group/name - [DEFAULT]/tcp_keepidle # This option is deprecated for removal. # Its value may be silently ignored in the future. #tcp_keepidle = 600 [eventlet_server_ssl] # # From keystone # # Toggle for SSL support on the Keystone eventlet servers. (boolean value) # Deprecated group/name - [ssl]/enable # This option is deprecated for removal. # Its value may be silently ignored in the future. #enable = false # Path of the certfile for SSL. For non-production environments, you may be # interested in using `keystone-manage ssl_setup` to generate self-signed # certificates. (string value) # Deprecated group/name - [ssl]/certfile # This option is deprecated for removal. # Its value may be silently ignored in the future. #certfile = /etc/keystone/ssl/certs/keystone.pem # Path of the keyfile for SSL. (string value) # Deprecated group/name - [ssl]/keyfile # This option is deprecated for removal. # Its value may be silently ignored in the future. #keyfile = /etc/keystone/ssl/private/keystonekey.pem # Path of the CA cert file for SSL. (string value) # Deprecated group/name - [ssl]/ca_certs # This option is deprecated for removal. # Its value may be silently ignored in the future. #ca_certs = /etc/keystone/ssl/certs/ca.pem # Require client certificate. (boolean value) # Deprecated group/name - [ssl]/cert_required # This option is deprecated for removal. # Its value may be silently ignored in the future. #cert_required = false [federation] # # From keystone # # Entrypoint for the federation backend driver in the keystone.federation # namespace. (string value) #driver = sql # Value to be used when filtering assertion parameters from the environment. # (string value) #assertion_prefix = # Value to be used to obtain the entity ID of the Identity Provider from the # environment (e.g. if using the mod_shib plugin this value is `Shib-Identity- # Provider`). (string value) #remote_id_attribute = # A domain name that is reserved to allow federated ephemeral users to have a # domain concept. Note that an admin will not be able to create a domain with # this name or update an existing domain to this name. You are not advised to # change this value unless you really have to. (string value) #federated_domain_name = Federated # A list of trusted dashboard hosts. Before accepting a Single Sign-On request # to return a token, the origin host must be a member of the trusted_dashboard # list. This configuration option may be repeated for multiple values. For # example: trusted_dashboard=http://acme.com/auth/websso # trusted_dashboard=http://beta.com/auth/websso (multi valued) #trusted_dashboard = # Location of Single Sign-On callback handler, will return a token to a trusted # dashboard host. (string value) #sso_callback_template = /etc/keystone/sso_callback_template.html [fernet_tokens] # # From keystone # # Directory containing Fernet token keys. (string value) #key_repository = /etc/keystone/fernet-keys/ # This controls how many keys are held in rotation by keystone-manage # fernet_rotate before they are discarded. The default value of 3 means that # keystone will maintain one staged key, one primary key, and one secondary # key. Increasing this value means that additional secondary keys will be kept # in the rotation. (integer value) #max_active_keys = 3 [identity] # # From keystone # # This references the domain to use for all Identity API v2 requests (which are # not aware of domains). A domain with this ID will be created for you by # keystone-manage db_sync in migration 008. The domain referenced by this ID # cannot be deleted on the v3 API, to prevent accidentally breaking the v2 API. # There is nothing special about this domain, other than the fact that it must # exist to order to maintain support for your v2 clients. (string value) #default_domain_id = default # A subset (or all) of domains can have their own identity driver, each with # their own partial configuration options, stored in either the resource # backend or in a file in a domain configuration directory (depending on the # setting of domain_configurations_from_database). Only values specific to the # domain need to be specified in this manner. This feature is disabled by # default; set to true to enable. (boolean value) #domain_specific_drivers_enabled = false # Extract the domain specific configuration options from the resource backend # where they have been stored with the domain data. This feature is disabled by # default (in which case the domain specific options will be loaded from files # in the domain configuration directory); set to true to enable. (boolean # value) #domain_configurations_from_database = false # Path for Keystone to locate the domain specific identity configuration files # if domain_specific_drivers_enabled is set to true. (string value) #domain_config_dir = /etc/keystone/domains # Entrypoint for the identity backend driver in the keystone.identity # namespace. Supplied drivers are ldap and sql. (string value) #driver = sql # Toggle for identity caching. This has no effect unless global caching is # enabled. (boolean value) #caching = true # Time to cache identity data (in seconds). This has no effect unless global # and identity caching are enabled. (integer value) #cache_time = 600 # Maximum supported length for user passwords; decrease to improve performance. # (integer value) # Maximum value: 4096 #max_password_length = 4096 # Maximum number of entities that will be returned in an identity collection. # (integer value) #list_limit = [identity_mapping] # # From keystone # # Entrypoint for the identity mapping backend driver in the # keystone.identity.id_mapping namespace. (string value) #driver = sql # Entrypoint for the public ID generator for user and group entities in the # keystone.identity.id_generator namespace. The Keystone identity mapper only # supports generators that produce no more than 64 characters. (string value) #generator = sha256 # The format of user and group IDs changed in Juno for backends that do not # generate UUIDs (e.g. LDAP), with keystone providing a hash mapping to the # underlying attribute in LDAP. By default this mapping is disabled, which # ensures that existing IDs will not change. Even when the mapping is enabled # by using domain specific drivers, any users and groups from the default # domain being handled by LDAP will still not be mapped to ensure their IDs # remain backward compatible. Setting this value to False will enable the # mapping for even the default LDAP driver. It is only safe to do this if you # do not already have assignments for users and groups from the default LDAP # domain, and it is acceptable for Keystone to provide the different IDs to # clients than it did previously. Typically this means that the only time you # can set this value to False is when configuring a fresh installation. # (boolean value) #backward_compatible_ids = true [kvs] # # From keystone # # Extra dogpile.cache backend modules to register with the dogpile.cache # library. (list value) #backends = # Prefix for building the configuration dictionary for the KVS region. This # should not need to be changed unless there is another dogpile.cache region # with the same configuration name. (string value) #config_prefix = keystone.kvs # Toggle to disable using a key-mangling function to ensure fixed length keys. # This is toggle-able for debugging purposes, it is highly recommended to # always leave this set to true. (boolean value) #enable_key_mangler = true # Default lock timeout (in seconds) for distributed locking. (integer value) #default_lock_timeout = 5 [ldap] # # From keystone # # URL(s) for connecting to the LDAP server. Multiple LDAP URLs may be specified # as a comma separated string. The first URL to successfully bind is used for # the connection. (string value) #url = ldap://localhost # User BindDN to query the LDAP server. (string value) #user = # Password for the BindDN to query the LDAP server. (string value) #password = # LDAP server suffix (string value) #suffix = cn=example,cn=com # If true, will add a dummy member to groups. This is required if the # objectclass for groups requires the "member" attribute. (boolean value) #use_dumb_member = false # DN of the "dummy member" to use when "use_dumb_member" is enabled. (string # value) #dumb_member = cn=dumb,dc=nonexistent # Delete subtrees using the subtree delete control. Only enable this option if # your LDAP server supports subtree deletion. (boolean value) #allow_subtree_delete = false # The LDAP scope for queries, "one" represents oneLevel/singleLevel and "sub" # represents subtree/wholeSubtree options. (string value) # Allowed values: one, sub #query_scope = one # Maximum results per page; a value of zero ("0") disables paging. (integer # value) #page_size = 0 # The LDAP dereferencing option for queries. The "default" option falls back to # using default dereferencing configured by your ldap.conf. (string value) # Allowed values: never, searching, always, finding, default #alias_dereferencing = default # Sets the LDAP debugging level for LDAP calls. A value of 0 means that # debugging is not enabled. This value is a bitmask, consult your LDAP # documentation for possible values. (integer value) #debug_level = # Override the system's default referral chasing behavior for queries. (boolean # value) #chase_referrals = # Search base for users. Defaults to the suffix value. (string value) #user_tree_dn = # LDAP search filter for users. (string value) #user_filter = # LDAP objectclass for users. (string value) #user_objectclass = inetOrgPerson # LDAP attribute mapped to user id. WARNING: must not be a multivalued # attribute. (string value) #user_id_attribute = cn # LDAP attribute mapped to user name. (string value) #user_name_attribute = sn # LDAP attribute mapped to user description. (string value) #user_description_attribute = description # LDAP attribute mapped to user email. (string value) #user_mail_attribute = mail # LDAP attribute mapped to password. (string value) #user_pass_attribute = userPassword # LDAP attribute mapped to user enabled flag. (string value) #user_enabled_attribute = enabled # Invert the meaning of the boolean enabled values. Some LDAP servers use a # boolean lock attribute where "true" means an account is disabled. Setting # "user_enabled_invert = true" will allow these lock attributes to be used. # This setting will have no effect if "user_enabled_mask" or # "user_enabled_emulation" settings are in use. (boolean value) #user_enabled_invert = false # Bitmask integer to indicate the bit that the enabled value is stored in if # the LDAP server represents "enabled" as a bit on an integer rather than a # boolean. A value of "0" indicates the mask is not used. If this is not set to # "0" the typical value is "2". This is typically used when # "user_enabled_attribute = userAccountControl". (integer value) #user_enabled_mask = 0 # Default value to enable users. This should match an appropriate int value if # the LDAP server uses non-boolean (bitmask) values to indicate if a user is # enabled or disabled. If this is not set to "True" the typical value is "512". # This is typically used when "user_enabled_attribute = userAccountControl". # (string value) #user_enabled_default = True # List of attributes stripped off the user on update. (list value) #user_attribute_ignore = default_project_id # LDAP attribute mapped to default_project_id for users. (string value) #user_default_project_id_attribute = # Allow user creation in LDAP backend. (boolean value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: Write support for Identity LDAP backends has been deprecated in the M # release and will be removed in the O release. #user_allow_create = true # Allow user updates in LDAP backend. (boolean value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: Write support for Identity LDAP backends has been deprecated in the M # release and will be removed in the O release. #user_allow_update = true # Allow user deletion in LDAP backend. (boolean value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: Write support for Identity LDAP backends has been deprecated in the M # release and will be removed in the O release. #user_allow_delete = true # If true, Keystone uses an alternative method to determine if a user is # enabled or not by checking if they are a member of the # "user_enabled_emulation_dn" group. (boolean value) #user_enabled_emulation = false # DN of the group entry to hold enabled users when using enabled emulation. # (string value) #user_enabled_emulation_dn = # Use the "group_member_attribute" and "group_objectclass" settings to # determine membership in the emulated enabled group. (boolean value) #user_enabled_emulation_use_group_config = false # List of additional LDAP attributes used for mapping additional attribute # mappings for users. Attribute mapping format is :, # where ldap_attr is the attribute in the LDAP entry and user_attr is the # Identity API attribute. (list value) #user_additional_attribute_mapping = # Search base for groups. Defaults to the suffix value. (string value) #group_tree_dn = # LDAP search filter for groups. (string value) #group_filter = # LDAP objectclass for groups. (string value) #group_objectclass = groupOfNames # LDAP attribute mapped to group id. (string value) #group_id_attribute = cn # LDAP attribute mapped to group name. (string value) #group_name_attribute = ou # LDAP attribute mapped to show group membership. (string value) #group_member_attribute = member # LDAP attribute mapped to group description. (string value) #group_desc_attribute = description # List of attributes stripped off the group on update. (list value) #group_attribute_ignore = # Allow group creation in LDAP backend. (boolean value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: Write support for Identity LDAP backends has been deprecated in the M # release and will be removed in the O release. #group_allow_create = true # Allow group update in LDAP backend. (boolean value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: Write support for Identity LDAP backends has been deprecated in the M # release and will be removed in the O release. #group_allow_update = true # Allow group deletion in LDAP backend. (boolean value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: Write support for Identity LDAP backends has been deprecated in the M # release and will be removed in the O release. #group_allow_delete = true # Additional attribute mappings for groups. Attribute mapping format is # :, where ldap_attr is the attribute in the LDAP entry # and user_attr is the Identity API attribute. (list value) #group_additional_attribute_mapping = # CA certificate file path for communicating with LDAP servers. (string value) #tls_cacertfile = # CA certificate directory path for communicating with LDAP servers. (string # value) #tls_cacertdir = # Enable TLS for communicating with LDAP servers. (boolean value) #use_tls = false # Specifies what checks to perform on client certificates in an incoming TLS # session. (string value) # Allowed values: demand, never, allow #tls_req_cert = demand # Enable LDAP connection pooling. (boolean value) #use_pool = true # Connection pool size. (integer value) #pool_size = 10 # Maximum count of reconnect trials. (integer value) #pool_retry_max = 3 # Time span in seconds to wait between two reconnect trials. (floating point # value) #pool_retry_delay = 0.1 # Connector timeout in seconds. Value -1 indicates indefinite wait for # response. (integer value) #pool_connection_timeout = -1 # Connection lifetime in seconds. (integer value) #pool_connection_lifetime = 600 # Enable LDAP connection pooling for end user authentication. If use_pool is # disabled, then this setting is meaningless and is not used at all. (boolean # value) #use_auth_pool = true # End user auth connection pool size. (integer value) #auth_pool_size = 100 # End user auth connection lifetime in seconds. (integer value) #auth_pool_connection_lifetime = 60 # If the members of the group objectclass are user IDs rather than DNs, set # this to true. This is the case when using posixGroup as the group objectclass # and OpenDirectory. (boolean value) #group_members_are_ids = false [matchmaker_redis] # # From oslo.messaging # # Host to locate redis. (string value) #host = 127.0.0.1 # Use this port to connect to redis host. (port value) # Minimum value: 0 # Maximum value: 65535 #port = 6379 # Password for Redis server (optional). (string value) #password = # List of Redis Sentinel hosts (fault tolerance mode) e.g. # [host:port, host1:port ... ] (list value) #sentinel_hosts = # Redis replica set name. (string value) #sentinel_group_name = oslo-messaging-zeromq # Time in ms to wait between connection attempts. (integer value) #wait_timeout = 500 # Time in ms to wait before the transaction is killed. (integer value) #check_timeout = 20000 # Timeout in ms on blocking socket operations (integer value) #socket_timeout = 1000 [memcache] # # From keystone # # Memcache servers in the format of "host:port". (list value) #servers = localhost:11211 # Number of seconds memcached server is considered dead before it is tried # again. This is used by the key value store system (e.g. token pooled # memcached persistence backend). (integer value) #dead_retry = 300 # Timeout in seconds for every call to a server. This is used by the key value # store system (e.g. token pooled memcached persistence backend). (integer # value) #socket_timeout = 3 # Max total number of open connections to every memcached server. This is used # by the key value store system (e.g. token pooled memcached persistence # backend). (integer value) #pool_maxsize = 10 # Number of seconds a connection to memcached is held unused in the pool before # it is closed. This is used by the key value store system (e.g. token pooled # memcached persistence backend). (integer value) #pool_unused_timeout = 60 # Number of seconds that an operation will wait to get a memcache client # connection. This is used by the key value store system (e.g. token pooled # memcached persistence backend). (integer value) #pool_connection_get_timeout = 10 [oauth1] # # From keystone # # Entrypoint for the OAuth backend driver in the keystone.oauth1 namespace. # (string value) #driver = sql # Duration (in seconds) for the OAuth Request Token. (integer value) #request_token_duration = 28800 # Duration (in seconds) for the OAuth Access Token. (integer value) #access_token_duration = 86400 [os_inherit] # # From keystone # # role-assignment inheritance to projects from owning domain or from projects # higher in the hierarchy can be optionally disabled. In the future, this # option will be removed and the hierarchy will be always enabled. (boolean # value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: The option to enable the OS-INHERIT extension has been deprecated in # the M release and will be removed in the O release. The OS-INHERIT extension # will be enabled by default. #enabled = true [oslo_messaging_amqp] # # From oslo.messaging # # address prefix used when sending to a specific server (string value) # Deprecated group/name - [amqp1]/server_request_prefix #server_request_prefix = exclusive # address prefix used when broadcasting to all servers (string value) # Deprecated group/name - [amqp1]/broadcast_prefix #broadcast_prefix = broadcast # address prefix when sending to any server in group (string value) # Deprecated group/name - [amqp1]/group_request_prefix #group_request_prefix = unicast # Name for the AMQP container (string value) # Deprecated group/name - [amqp1]/container_name #container_name = # Timeout for inactive connections (in seconds) (integer value) # Deprecated group/name - [amqp1]/idle_timeout #idle_timeout = 0 # Debug: dump AMQP frames to stdout (boolean value) # Deprecated group/name - [amqp1]/trace #trace = false # CA certificate PEM file to verify server certificate (string value) # Deprecated group/name - [amqp1]/ssl_ca_file #ssl_ca_file = # Identifying certificate PEM file to present to clients (string value) # Deprecated group/name - [amqp1]/ssl_cert_file #ssl_cert_file = # Private key PEM file used to sign cert_file certificate (string value) # Deprecated group/name - [amqp1]/ssl_key_file #ssl_key_file = # Password for decrypting ssl_key_file (if encrypted) (string value) # Deprecated group/name - [amqp1]/ssl_key_password #ssl_key_password = # Accept clients using either SSL or plain TCP (boolean value) # Deprecated group/name - [amqp1]/allow_insecure_clients #allow_insecure_clients = false # Space separated list of acceptable SASL mechanisms (string value) # Deprecated group/name - [amqp1]/sasl_mechanisms #sasl_mechanisms = # Path to directory that contains the SASL configuration (string value) # Deprecated group/name - [amqp1]/sasl_config_dir #sasl_config_dir = # Name of configuration file (without .conf suffix) (string value) # Deprecated group/name - [amqp1]/sasl_config_name #sasl_config_name = # User name for message broker authentication (string value) # Deprecated group/name - [amqp1]/username #username = # Password for message broker authentication (string value) # Deprecated group/name - [amqp1]/password #password = [oslo_messaging_notifications] # # From oslo.messaging # # The Drivers(s) to handle sending notifications. Possible values are # messaging, messagingv2, routing, log, test, noop (multi valued) # Deprecated group/name - [DEFAULT]/notification_driver #driver = # A URL representing the messaging driver to use for notifications. If not set, # we fall back to the same configuration used for RPC. (string value) # Deprecated group/name - [DEFAULT]/notification_transport_url #transport_url = # AMQP topic used for OpenStack notifications. (list value) # Deprecated group/name - [rpc_notifier2]/topics # Deprecated group/name - [DEFAULT]/notification_topics #topics = notifications [oslo_messaging_rabbit] # # From oslo.messaging # # Use durable queues in AMQP. (boolean value) # Deprecated group/name - [DEFAULT]/amqp_durable_queues # Deprecated group/name - [DEFAULT]/rabbit_durable_queues #amqp_durable_queues = false # Auto-delete queues in AMQP. (boolean value) # Deprecated group/name - [DEFAULT]/amqp_auto_delete #amqp_auto_delete = false # SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and # SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some # distributions. (string value) # Deprecated group/name - [DEFAULT]/kombu_ssl_version #kombu_ssl_version = # SSL key file (valid only if SSL enabled). (string value) # Deprecated group/name - [DEFAULT]/kombu_ssl_keyfile #kombu_ssl_keyfile = # SSL cert file (valid only if SSL enabled). (string value) # Deprecated group/name - [DEFAULT]/kombu_ssl_certfile #kombu_ssl_certfile = # SSL certification authority file (valid only if SSL enabled). (string value) # Deprecated group/name - [DEFAULT]/kombu_ssl_ca_certs #kombu_ssl_ca_certs = # How long to wait before reconnecting in response to an AMQP consumer cancel # notification. (floating point value) # Deprecated group/name - [DEFAULT]/kombu_reconnect_delay #kombu_reconnect_delay = 1.0 # EXPERIMENTAL: Possible values are: gzip, bz2. If not set compression will not # be used. This option may notbe available in future versions. (string value) #kombu_compression = # How long to wait a missing client beforce abandoning to send it its replies. # This value should not be longer than rpc_response_timeout. (integer value) # Deprecated group/name - [DEFAULT]/kombu_reconnect_timeout #kombu_missing_consumer_retry_timeout = 60 # Determines how the next RabbitMQ node is chosen in case the one we are # currently connected to becomes unavailable. Takes effect only if more than # one RabbitMQ node is provided in config. (string value) # Allowed values: round-robin, shuffle #kombu_failover_strategy = round-robin # The RabbitMQ broker address where a single node is used. (string value) # Deprecated group/name - [DEFAULT]/rabbit_host #rabbit_host = localhost # The RabbitMQ broker port where a single node is used. (port value) # Minimum value: 0 # Maximum value: 65535 # Deprecated group/name - [DEFAULT]/rabbit_port #rabbit_port = 5672 # RabbitMQ HA cluster host:port pairs. (list value) # Deprecated group/name - [DEFAULT]/rabbit_hosts #rabbit_hosts = $rabbit_host:$rabbit_port # Connect over SSL for RabbitMQ. (boolean value) # Deprecated group/name - [DEFAULT]/rabbit_use_ssl #rabbit_use_ssl = false # The RabbitMQ userid. (string value) # Deprecated group/name - [DEFAULT]/rabbit_userid #rabbit_userid = guest # The RabbitMQ password. (string value) # Deprecated group/name - [DEFAULT]/rabbit_password #rabbit_password = guest # The RabbitMQ login method. (string value) # Deprecated group/name - [DEFAULT]/rabbit_login_method #rabbit_login_method = AMQPLAIN # The RabbitMQ virtual host. (string value) # Deprecated group/name - [DEFAULT]/rabbit_virtual_host #rabbit_virtual_host = / # How frequently to retry connecting with RabbitMQ. (integer value) #rabbit_retry_interval = 1 # How long to backoff for between retries when connecting to RabbitMQ. (integer # value) # Deprecated group/name - [DEFAULT]/rabbit_retry_backoff #rabbit_retry_backoff = 2 # Maximum interval of RabbitMQ connection retries. Default is 30 seconds. # (integer value) #rabbit_interval_max = 30 # Maximum number of RabbitMQ connection retries. Default is 0 (infinite retry # count). (integer value) # Deprecated group/name - [DEFAULT]/rabbit_max_retries #rabbit_max_retries = 0 # Try to use HA queues in RabbitMQ (x-ha-policy: all). If you change this # option, you must wipe the RabbitMQ database. In RabbitMQ 3.0, queue mirroring # is no longer controlled by the x-ha-policy argument when declaring a queue. # If you just want to make sure that all queues (except those with auto- # generated names) are mirrored across all nodes, run: "rabbitmqctl set_policy # HA '^(?!amq\.).*' '{"ha-mode": "all"}' " (boolean value) # Deprecated group/name - [DEFAULT]/rabbit_ha_queues #rabbit_ha_queues = false # Positive integer representing duration in seconds for queue TTL (x-expires). # Queues which are unused for the duration of the TTL are automatically # deleted. The parameter affects only reply and fanout queues. (integer value) # Minimum value: 1 #rabbit_transient_queues_ttl = 1800 # Specifies the number of messages to prefetch. Setting to zero allows # unlimited messages. (integer value) #rabbit_qos_prefetch_count = 0 # Number of seconds after which the Rabbit broker is considered down if # heartbeat's keep-alive fails (0 disable the heartbeat). EXPERIMENTAL (integer # value) #heartbeat_timeout_threshold = 60 # How often times during the heartbeat_timeout_threshold we check the # heartbeat. (integer value) #heartbeat_rate = 2 # Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake (boolean value) # Deprecated group/name - [DEFAULT]/fake_rabbit #fake_rabbit = false # Maximum number of channels to allow (integer value) #channel_max = # The maximum byte size for an AMQP frame (integer value) #frame_max = # How often to send heartbeats for consumer's connections (integer value) #heartbeat_interval = 1 # Enable SSL (boolean value) #ssl = # Arguments passed to ssl.wrap_socket (dict value) #ssl_options = # Set socket timeout in seconds for connection's socket (floating point value) #socket_timeout = 0.25 # Set TCP_USER_TIMEOUT in seconds for connection's socket (floating point # value) #tcp_user_timeout = 0.25 # Set delay for reconnection to some host which has connection error (floating # point value) #host_connection_reconnect_delay = 0.25 # Maximum number of connections to keep queued. (integer value) #pool_max_size = 10 # Maximum number of connections to create above `pool_max_size`. (integer # value) #pool_max_overflow = 0 # Default number of seconds to wait for a connections to available (integer # value) #pool_timeout = 30 # Lifetime of a connection (since creation) in seconds or None for no # recycling. Expired connections are closed on acquire. (integer value) #pool_recycle = 600 # Threshold at which inactive (since release) connections are considered stale # in seconds or None for no staleness. Stale connections are closed on acquire. # (integer value) #pool_stale = 60 # Persist notification messages. (boolean value) #notification_persistence = false # Exchange name for for sending notifications (string value) #default_notification_exchange = ${control_exchange}_notification # Max number of not acknowledged message which RabbitMQ can send to # notification listener. (integer value) #notification_listener_prefetch_count = 100 # Reconnecting retry count in case of connectivity problem during sending # notification, -1 means infinite retry. (integer value) #default_notification_retry_attempts = -1 # Reconnecting retry delay in case of connectivity problem during sending # notification message (floating point value) #notification_retry_delay = 0.25 # Time to live for rpc queues without consumers in seconds. (integer value) #rpc_queue_expiration = 60 # Exchange name for sending RPC messages (string value) #default_rpc_exchange = ${control_exchange}_rpc # Exchange name for receiving RPC replies (string value) #rpc_reply_exchange = ${control_exchange}_rpc_reply # Max number of not acknowledged message which RabbitMQ can send to rpc # listener. (integer value) #rpc_listener_prefetch_count = 100 # Max number of not acknowledged message which RabbitMQ can send to rpc reply # listener. (integer value) #rpc_reply_listener_prefetch_count = 100 # Reconnecting retry count in case of connectivity problem during sending # reply. -1 means infinite retry during rpc_timeout (integer value) #rpc_reply_retry_attempts = -1 # Reconnecting retry delay in case of connectivity problem during sending # reply. (floating point value) #rpc_reply_retry_delay = 0.25 # Reconnecting retry count in case of connectivity problem during sending RPC # message, -1 means infinite retry. If actual retry attempts in not 0 the rpc # request could be processed more then one time (integer value) #default_rpc_retry_attempts = -1 # Reconnecting retry delay in case of connectivity problem during sending RPC # message (floating point value) #rpc_retry_delay = 0.25 [oslo_middleware] # # From oslo.middleware # # The maximum body size for each request, in bytes. (integer value) # Deprecated group/name - [DEFAULT]/osapi_max_request_body_size # Deprecated group/name - [DEFAULT]/max_request_body_size #max_request_body_size = 114688 # The HTTP Header that will be used to determine what the original request # protocol scheme was, even if it was hidden by an SSL termination proxy. # (string value) # This option is deprecated for removal. # Its value may be silently ignored in the future. #secure_proxy_ssl_header = X-Forwarded-Proto [oslo_policy] # # From oslo.policy # # The JSON file that defines policies. (string value) # Deprecated group/name - [DEFAULT]/policy_file #policy_file = policy.json # Default rule. Enforced when a requested rule is not found. (string value) # Deprecated group/name - [DEFAULT]/policy_default_rule #policy_default_rule = default # Directories where policy configuration files are stored. They can be relative # to any directory in the search path defined by the config_dir option, or # absolute paths. The file defined by policy_file must exist for these # directories to be searched. Missing or empty directories are ignored. (multi # valued) # Deprecated group/name - [DEFAULT]/policy_dirs #policy_dirs = policy.d [paste_deploy] # # From keystone # # Name of the paste configuration file that defines the available pipelines. # (string value) #config_file = keystone-paste.ini [policy] # # From keystone # # Entrypoint for the policy backend driver in the keystone.policy namespace. # Supplied drivers are rules and sql. (string value) #driver = sql # Maximum number of entities that will be returned in a policy collection. # (integer value) #list_limit = [resource] # # From keystone # # Entrypoint for the resource backend driver in the keystone.resource # namespace. Only an SQL driver is supplied. If a resource driver is not # specified, the assignment driver will choose the resource driver. (string # value) #driver = # Toggle for resource caching. This has no effect unless global caching is # enabled. (boolean value) # Deprecated group/name - [assignment]/caching #caching = true # TTL (in seconds) to cache resource data. This has no effect unless global # caching is enabled. (integer value) # Deprecated group/name - [assignment]/cache_time #cache_time = # Maximum number of entities that will be returned in a resource collection. # (integer value) # Deprecated group/name - [assignment]/list_limit #list_limit = # Name of the domain that owns the `admin_project_name`. Defaults to None. # (string value) #admin_project_domain_name = # Special project for performing administrative operations on remote services. # Tokens scoped to this project will contain the key/value # `is_admin_project=true`. Defaults to None. (string value) #admin_project_name = # Whether the names of projects are restricted from containing url reserved # characters. If set to new, attempts to create or update a project with a url # unsafe name will return an error. In addition, if set to strict, attempts to # scope a token using an unsafe project name will return an error. (string # value) # Allowed values: off, new, strict #project_name_url_safe = off # Whether the names of domains are restricted from containing url reserved # characters. If set to new, attempts to create or update a domain with a url # unsafe name will return an error. In addition, if set to strict, attempts to # scope a token using a domain name which is unsafe will return an error. # (string value) # Allowed values: off, new, strict #domain_name_url_safe = off [revoke] # # From keystone # # Entrypoint for an implementation of the backend for persisting revocation # events in the keystone.revoke namespace. Supplied drivers are kvs and sql. # (string value) #driver = sql # This value (calculated in seconds) is added to token expiration before a # revocation event may be removed from the backend. (integer value) #expiration_buffer = 1800 # Toggle for revocation event caching. This has no effect unless global caching # is enabled. (boolean value) #caching = true # Time to cache the revocation list and the revocation events (in seconds). # This has no effect unless global and token caching are enabled. (integer # value) # Deprecated group/name - [token]/revocation_cache_time #cache_time = 3600 [role] # # From keystone # # Entrypoint for the role backend driver in the keystone.role namespace. # Supplied drivers are ldap and sql. (string value) #driver = # Toggle for role caching. This has no effect unless global caching is enabled. # (boolean value) #caching = true # TTL (in seconds) to cache role data. This has no effect unless global caching # is enabled. (integer value) #cache_time = # Maximum number of entities that will be returned in a role collection. # (integer value) #list_limit = [saml] # # From keystone # # Default TTL, in seconds, for any generated SAML assertion created by # Keystone. (integer value) #assertion_expiration_time = 3600 # Binary to be called for XML signing. Install the appropriate package, specify # absolute path or adjust your PATH environment variable if the binary cannot # be found. (string value) #xmlsec1_binary = xmlsec1 # Path of the certfile for SAML signing. For non-production environments, you # may be interested in using `keystone-manage pki_setup` to generate self- # signed certificates. Note, the path cannot contain a comma. (string value) #certfile = /etc/keystone/ssl/certs/signing_cert.pem # Path of the keyfile for SAML signing. Note, the path cannot contain a comma. # (string value) #keyfile = /etc/keystone/ssl/private/signing_key.pem # Entity ID value for unique Identity Provider identification. Usually FQDN is # set with a suffix. A value is required to generate IDP Metadata. For example: # https://keystone.example.com/v3/OS-FEDERATION/saml2/idp (string value) #idp_entity_id = # Identity Provider Single-Sign-On service value, required in the Identity # Provider's metadata. A value is required to generate IDP Metadata. For # example: https://keystone.example.com/v3/OS-FEDERATION/saml2/sso (string # value) #idp_sso_endpoint = # Language used by the organization. (string value) #idp_lang = en # Organization name the installation belongs to. (string value) #idp_organization_name = # Organization name to be displayed. (string value) #idp_organization_display_name = # URL of the organization. (string value) #idp_organization_url = # Company of contact person. (string value) #idp_contact_company = # Given name of contact person (string value) #idp_contact_name = # Surname of contact person. (string value) #idp_contact_surname = # Email address of contact person. (string value) #idp_contact_email = # Telephone number of contact person. (string value) #idp_contact_telephone = # The contact type describing the main point of contact for the identity # provider. (string value) # Allowed values: technical, support, administrative, billing, other #idp_contact_type = other # Path to the Identity Provider Metadata file. This file should be generated # with the keystone-manage saml_idp_metadata command. (string value) #idp_metadata_path = /etc/keystone/saml2_idp_metadata.xml # The prefix to use for the RelayState SAML attribute, used when generating ECP # wrapped assertions. (string value) #relay_state_prefix = ss:mem: [shadow_users] # # From keystone # # Entrypoint for the shadow users backend driver in the # keystone.identity.shadow_users namespace. (string value) #driver = sql [signing] # # From keystone # # Path of the certfile for token signing. For non-production environments, you # may be interested in using `keystone-manage pki_setup` to generate self- # signed certificates. (string value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: PKI token support has been deprecated in the M release and will be # removed in the O release. Fernet or UUID tokens are recommended. #certfile = /etc/keystone/ssl/certs/signing_cert.pem # Path of the keyfile for token signing. (string value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: PKI token support has been deprecated in the M release and will be # removed in the O release. Fernet or UUID tokens are recommended. #keyfile = /etc/keystone/ssl/private/signing_key.pem # Path of the CA for token signing. (string value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: PKI token support has been deprecated in the M release and will be # removed in the O release. Fernet or UUID tokens are recommended. #ca_certs = /etc/keystone/ssl/certs/ca.pem # Path of the CA key for token signing. (string value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: PKI token support has been deprecated in the M release and will be # removed in the O release. Fernet or UUID tokens are recommended. #ca_key = /etc/keystone/ssl/private/cakey.pem # Key size (in bits) for token signing cert (auto generated certificate). # (integer value) # Minimum value: 1024 # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: PKI token support has been deprecated in the M release and will be # removed in the O release. Fernet or UUID tokens are recommended. #key_size = 2048 # Days the token signing cert is valid for (auto generated certificate). # (integer value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: PKI token support has been deprecated in the M release and will be # removed in the O release. Fernet or UUID tokens are recommended. #valid_days = 3650 # Certificate subject (auto generated certificate) for token signing. (string # value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: PKI token support has been deprecated in the M release and will be # removed in the O release. Fernet or UUID tokens are recommended. #cert_subject = /C=US/ST=Unset/L=Unset/O=Unset/CN=www.example.com [ssl] # # From keystone # # Path of the CA key file for SSL. (string value) #ca_key = /etc/keystone/ssl/private/cakey.pem # SSL key length (in bits) (auto generated certificate). (integer value) # Minimum value: 1024 #key_size = 1024 # Days the certificate is valid for once signed (auto generated certificate). # (integer value) #valid_days = 3650 # SSL certificate subject (auto generated certificate). (string value) #cert_subject = /C=US/ST=Unset/L=Unset/O=Unset/CN=localhost [token] # # From keystone # # External auth mechanisms that should add bind information to token, e.g., # kerberos,x509. (list value) #bind = # Enforcement policy on tokens presented to Keystone with bind information. One # of disabled, permissive, strict, required or a specifically required bind # mode, e.g., kerberos or x509 to require binding to that authentication. # (string value) #enforce_token_bind = permissive # Amount of time a token should remain valid (in seconds). (integer value) #expiration = 3600 # Controls the token construction, validation, and revocation operations. # Entrypoint in the keystone.token.provider namespace. Core providers are # [fernet|pkiz|pki|uuid]. (string value) #provider = uuid # Entrypoint for the token persistence backend driver in the # keystone.token.persistence namespace. Supplied drivers are kvs, memcache, # memcache_pool, and sql. (string value) #driver = sql # Toggle for token system caching. This has no effect unless global caching is # enabled. (boolean value) #caching = true # Time to cache tokens (in seconds). This has no effect unless global and token # caching are enabled. (integer value) #cache_time = # Revoke token by token identifier. Setting revoke_by_id to true enables # various forms of enumerating tokens, e.g. `list tokens for user`. These # enumerations are processed to determine the list of tokens to revoke. Only # disable if you are switching to using the Revoke extension with a backend # other than KVS, which stores events in memory. (boolean value) #revoke_by_id = true # Allow rescoping of scoped token. Setting allow_rescoped_scoped_token to false # prevents a user from exchanging a scoped token for any other token. (boolean # value) #allow_rescope_scoped_token = true # The hash algorithm to use for PKI tokens. This can be set to any algorithm # that hashlib supports. WARNING: Before changing this value, the auth_token # middleware must be configured with the hash_algorithms, otherwise token # revocation will not be processed correctly. (string value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: PKI token support has been deprecated in the M release and will be # removed in the O release. Fernet or UUID tokens are recommended. #hash_algorithm = md5 # Add roles to token that are not explicitly added, but that are linked # implicitly to other roles. (boolean value) #infer_roles = true [tokenless_auth] # # From keystone # # The list of trusted issuers to further filter the certificates that are # allowed to participate in the X.509 tokenless authorization. If the option is # absent then no certificates will be allowed. The naming format for the # attributes of a Distinguished Name(DN) must be separated by a comma and # contain no spaces. This configuration option may be repeated for multiple # values. For example: trusted_issuer=CN=john,OU=keystone,O=openstack # trusted_issuer=CN=mary,OU=eng,O=abc (multi valued) #trusted_issuer = # The protocol name for the X.509 tokenless authorization along with the option # issuer_attribute below can look up its corresponding mapping. (string value) #protocol = x509 # The issuer attribute that is served as an IdP ID for the X.509 tokenless # authorization along with the protocol to look up its corresponding mapping. # It is the environment variable in the WSGI environment that references to the # issuer of the client certificate. (string value) #issuer_attribute = SSL_CLIENT_I_DN [trust] # # From keystone # # Delegation and impersonation features can be optionally disabled. (boolean # value) #enabled = true # Enable redelegation feature. (boolean value) #allow_redelegation = false # Maximum depth of trust redelegation. (integer value) #max_redelegation_count = 3 # Entrypoint for the trust backend driver in the keystone.trust namespace. # (string value) #driver = sql keystone-9.0.0/babel.cfg0000664000567000056710000000002012701407102016232 0ustar jenkinsjenkins00000000000000[python: **.py] keystone-9.0.0/.coveragerc0000664000567000056710000000013512701407102016634 0ustar jenkinsjenkins00000000000000[run] branch = True source = keystone omit = keystone/tests/* [report] ignore_errors = True keystone-9.0.0/config-generator/0000775000567000056710000000000012701407246017756 5ustar jenkinsjenkins00000000000000keystone-9.0.0/config-generator/keystone.conf0000664000567000056710000000066212701407102022461 0ustar jenkinsjenkins00000000000000[DEFAULT] output_file = etc/keystone.conf.sample wrap_width = 79 namespace = keystone namespace = keystone.notifications namespace = oslo.cache namespace = oslo.log namespace = oslo.messaging namespace = oslo.policy namespace = oslo.db namespace = oslo.middleware namespace = oslo.service.service # We don't use oslo.concurrency config options in # keystone now, just in case it slips through unnoticed. #namespace = oslo.concurrency keystone-9.0.0/setup.cfg0000664000567000056710000001425012701407246016350 0ustar jenkinsjenkins00000000000000[metadata] name = keystone summary = OpenStack Identity description-file = README.rst author = OpenStack author-email = openstack-dev@lists.openstack.org home-page = http://docs.openstack.org/developer/keystone/ classifier = Environment :: OpenStack Intended Audience :: Information Technology Intended Audience :: System Administrators License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: 2 Programming Language :: Python :: 2.7 [files] packages = keystone [extras] ldap = python-ldap>=2.4:python_version=='2.7' # PSF ldappool>=1.0:python_version=='2.7' # MPL memcache = python-memcached>=1.56 # PSF mongodb = pymongo!=3.1,>=3.0.2 # Apache-2.0 bandit = bandit>=0.17.3 # Apache-2.0 [global] setup-hooks = pbr.hooks.setup_hook [egg_info] tag_build = tag_date = 0 tag_svn_revision = 0 [build_sphinx] all_files = 1 build-dir = doc/build source-dir = doc/source [compile_catalog] directory = keystone/locale domain = keystone [update_catalog] domain = keystone output_dir = keystone/locale input_file = keystone/locale/keystone.pot [extract_messages] keywords = _ gettext ngettext l_ lazy_gettext mapping_file = babel.cfg output_file = keystone/locale/keystone.pot copyright_holder = OpenStack Foundation msgid_bugs_address = https://bugs.launchpad.net/keystone [pbr] warnerrors = True autodoc_tree_index_modules = True [entry_points] console_scripts = keystone-all = keystone.cmd.all:main keystone-manage = keystone.cmd.manage:main wsgi_scripts = keystone-wsgi-admin = keystone.server.wsgi:initialize_admin_application keystone-wsgi-public = keystone.server.wsgi:initialize_public_application keystone.assignment = sql = keystone.assignment.backends.sql:Assignment keystone.auth.external = default = keystone.auth.plugins.external:DefaultDomain DefaultDomain = keystone.auth.plugins.external:DefaultDomain Domain = keystone.auth.plugins.external:Domain keystone.auth.kerberos = default = keystone.auth.plugins.external:KerberosDomain keystone.auth.oauth1 = default = keystone.auth.plugins.oauth1:OAuth keystone.auth.openid = default = keystone.auth.plugins.mapped:Mapped keystone.auth.password = default = keystone.auth.plugins.password:Password keystone.auth.saml2 = default = keystone.auth.plugins.mapped:Mapped keystone.auth.token = default = keystone.auth.plugins.token:Token keystone.auth.totp = default = keystone.auth.plugins.totp:TOTP keystone.auth.x509 = default = keystone.auth.plugins.mapped:Mapped keystone.catalog = sql = keystone.catalog.backends.sql:Catalog templated = keystone.catalog.backends.templated:Catalog endpoint_filter.sql = keystone.contrib.endpoint_filter.backends.catalog_sql:EndpointFilterCatalog keystone.credential = sql = keystone.credential.backends.sql:Credential keystone.identity = ldap = keystone.identity.backends.ldap:Identity sql = keystone.identity.backends.sql:Identity keystone.identity.id_generator = sha256 = keystone.identity.id_generators.sha256:Generator keystone.identity.id_mapping = sql = keystone.identity.mapping_backends.sql:Mapping keystone.identity.shadow_users = sql = keystone.identity.shadow_backends.sql:ShadowUsers keystone.policy = rules = keystone.policy.backends.rules:Policy sql = keystone.policy.backends.sql:Policy keystone.resource = sql = keystone.resource.backends.sql:Resource keystone.resource.domain_config = sql = keystone.resource.config_backends.sql:DomainConfig keystone.role = sql = keystone.assignment.role_backends.sql:Role keystone.token.persistence = kvs = keystone.token.persistence.backends.kvs:Token memcache = keystone.token.persistence.backends.memcache:Token memcache_pool = keystone.token.persistence.backends.memcache_pool:Token sql = keystone.token.persistence.backends.sql:Token keystone.token.provider = fernet = keystone.token.providers.fernet:Provider uuid = keystone.token.providers.uuid:Provider pki = keystone.token.providers.pki:Provider pkiz = keystone.token.providers.pkiz:Provider keystone.trust = sql = keystone.trust.backends.sql:Trust keystone.endpoint_filter = sql = keystone.catalog.backends.sql:Catalog keystone.endpoint_policy = sql = keystone.endpoint_policy.backends.sql:EndpointPolicy keystone.federation = sql = keystone.federation.backends.sql:Federation keystone.oauth1 = sql = keystone.oauth1.backends.sql:OAuth1 keystone.revoke = sql = keystone.revoke.backends.sql:Revoke oslo.config.opts = keystone = keystone.common.config:list_opts keystone.notifications = keystone.notifications:list_opts oslo.config.opts.defaults = keystone = keystone.common.config:set_middleware_defaults paste.filter_factory = admin_token_auth = keystone.middleware:AdminTokenAuthMiddleware.factory build_auth_context = keystone.middleware:AuthContextMiddleware.factory crud_extension = keystone.contrib.admin_crud:CrudExtension.factory debug = oslo_middleware:Debug.factory endpoint_filter_extension = keystone.contrib.endpoint_filter.routers:EndpointFilterExtension.factory ec2_extension = keystone.contrib.ec2:Ec2Extension.factory ec2_extension_v3 = keystone.contrib.ec2:Ec2ExtensionV3.factory federation_extension = keystone.contrib.federation.routers:FederationExtension.factory json_body = keystone.middleware:JsonBodyMiddleware.factory oauth1_extension = keystone.contrib.oauth1.routers:OAuth1Extension.factory request_id = oslo_middleware:RequestId.factory revoke_extension = keystone.contrib.revoke.routers:RevokeExtension.factory s3_extension = keystone.contrib.s3:S3Extension.factory simple_cert_extension = keystone.contrib.simple_cert:SimpleCertExtension.factory sizelimit = oslo_middleware.sizelimit:RequestBodySizeLimiter.factory token_auth = keystone.middleware:TokenAuthMiddleware.factory url_normalize = keystone.middleware:NormalizingFilter.factory user_crud_extension = keystone.contrib.user_crud:CrudExtension.factory paste.app_factory = admin_service = keystone.version.service:admin_app_factory admin_version_service = keystone.version.service:admin_version_app_factory public_service = keystone.version.service:public_app_factory public_version_service = keystone.version.service:public_version_app_factory service_v3 = keystone.version.service:v3_app_factory keystone-9.0.0/tools/0000775000567000056710000000000012701407246015665 5ustar jenkinsjenkins00000000000000keystone-9.0.0/tools/pretty_tox.sh0000775000567000056710000000050612701407102020435 0ustar jenkinsjenkins00000000000000#!/usr/bin/env bash set -o pipefail TESTRARGS=$1 python setup.py testr --testr-args="--subunit $TESTRARGS" | subunit-trace -f retval=$? # NOTE(mtreinish) The pipe above would eat the slowest display from pbr's testr # wrapper so just manually print the slowest tests. echo -e "\nSlowest Tests:\n" testr slowest exit $retval keystone-9.0.0/tools/pretty_tox_py3.sh0000775000567000056710000000064712701407102021236 0ustar jenkinsjenkins00000000000000#!/usr/bin/env bash set -o pipefail TESTRARGS=`python -c 'print ("^((?!%s).)*$" % "|".join(f.strip() for f in open("tests-py3-blacklist.txt")))'` python setup.py testr --testr-args="--subunit $TESTRARGS" | subunit-trace -f retval=$? # NOTE(mtreinish) The pipe above would eat the slowest display from pbr's testr # wrapper so just manually print the slowest tests. echo -e "\nSlowest Tests:\n" testr slowest exit $retval keystone-9.0.0/tools/sample_data.sh0000775000567000056710000002256712701407105020504 0ustar jenkinsjenkins00000000000000#!/usr/bin/env bash # Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Sample initial data for Keystone using python-openstackclient # # This script is based on the original DevStack keystone_data.sh script. # # It demonstrates how to bootstrap Keystone with an administrative user # using the OS_TOKEN and OS_URL environment variables and the administrative # API. It will get the admin_token (OS_TOKEN) and admin_port from # keystone.conf if available. # # Disable creation of endpoints by setting DISABLE_ENDPOINTS environment variable. # Use this with the Catalog Templated backend. # # A EC2-compatible credential is created for the admin user and # placed in etc/ec2rc. # # Tenant User Roles # ------------------------------------------------------- # demo admin admin # service glance service # service nova service # service ec2 service # service swift service # service neutron service # By default, passwords used are those in the OpenStack Install and Deploy Manual. # One can override these (publicly known, and hence, insecure) passwords by setting the appropriate # environment variables. A common default password for all the services can be used by # setting the "SERVICE_PASSWORD" environment variable. # Test to verify that the openstackclient is installed, if not exit type openstack >/dev/null 2>&1 || { echo >&2 "openstackclient is not installed. Please install it to use this script. Aborting." exit 1 } ADMIN_PASSWORD=${ADMIN_PASSWORD:-secrete} NOVA_PASSWORD=${NOVA_PASSWORD:-${SERVICE_PASSWORD:-nova}} GLANCE_PASSWORD=${GLANCE_PASSWORD:-${SERVICE_PASSWORD:-glance}} EC2_PASSWORD=${EC2_PASSWORD:-${SERVICE_PASSWORD:-ec2}} SWIFT_PASSWORD=${SWIFT_PASSWORD:-${SERVICE_PASSWORD:-swiftpass}} NEUTRON_PASSWORD=${NEUTRON_PASSWORD:-${SERVICE_PASSWORD:-neutron}} CONTROLLER_PUBLIC_ADDRESS=${CONTROLLER_PUBLIC_ADDRESS:-localhost} CONTROLLER_ADMIN_ADDRESS=${CONTROLLER_ADMIN_ADDRESS:-localhost} CONTROLLER_INTERNAL_ADDRESS=${CONTROLLER_INTERNAL_ADDRESS:-localhost} TOOLS_DIR=$(cd $(dirname "$0") && pwd) KEYSTONE_CONF=${KEYSTONE_CONF:-/etc/keystone/keystone.conf} if [[ -r "$KEYSTONE_CONF" ]]; then EC2RC="$(dirname "$KEYSTONE_CONF")/ec2rc" elif [[ -r "$TOOLS_DIR/../etc/keystone.conf" ]]; then # assume git checkout KEYSTONE_CONF="$TOOLS_DIR/../etc/keystone.conf" EC2RC="$TOOLS_DIR/../etc/ec2rc" else KEYSTONE_CONF="" EC2RC="ec2rc" fi # Extract some info from Keystone's configuration file if [[ -r "$KEYSTONE_CONF" ]]; then CONFIG_SERVICE_TOKEN=$(sed 's/[[:space:]]//g' $KEYSTONE_CONF | grep ^admin_token= | cut -d'=' -f2) if [[ -z "${CONFIG_SERVICE_TOKEN}" ]]; then # default config options are commented out, so lets try those CONFIG_SERVICE_TOKEN=$(sed 's/[[:space:]]//g' $KEYSTONE_CONF | grep ^\#admin_token= | cut -d'=' -f2) fi CONFIG_ADMIN_PORT=$(sed 's/[[:space:]]//g' $KEYSTONE_CONF | grep ^admin_port= | cut -d'=' -f2) if [[ -z "${CONFIG_ADMIN_PORT}" ]]; then # default config options are commented out, so lets try those CONFIG_ADMIN_PORT=$(sed 's/[[:space:]]//g' $KEYSTONE_CONF | grep ^\#admin_port= | cut -d'=' -f2) fi fi export OS_TOKEN=${OS_TOKEN:-$CONFIG_SERVICE_TOKEN} if [[ -z "$OS_TOKEN" ]]; then echo "No service token found." echo "Set OS_TOKEN manually from keystone.conf admin_token." exit 1 fi export OS_URL=${OS_URL:-http://$CONTROLLER_PUBLIC_ADDRESS:${CONFIG_ADMIN_PORT:-35357}/v2.0} function get_id () { echo `"$@" | grep ' id ' | awk '{print $4}'` } # # Roles # openstack role create admin openstack role create service # # Default tenant # openstack project create demo \ --description "Default Tenant" openstack user create admin --project demo \ --password "${ADMIN_PASSWORD}" openstack role add --user admin \ --project demo\ admin # # Service tenant # openstack project create service \ --description "Service Tenant" openstack user create glance --project service\ --password "${GLANCE_PASSWORD}" openstack role add --user glance \ --project service \ service openstack user create nova --project service\ --password "${NOVA_PASSWORD}" openstack role add --user nova \ --project service \ service openstack user create ec2 --project service \ --password "${EC2_PASSWORD}" openstack role add --user ec2 \ --project service \ service openstack user create swift --project service \ --password "${SWIFT_PASSWORD}" \ openstack role add --user swift \ --project service \ service openstack user create neutron --project service \ --password "${NEUTRON_PASSWORD}" \ openstack role add --user neutron \ --project service \ service # # Keystone service # openstack service create --name keystone \ --description "Keystone Identity Service" \ identity if [[ -z "$DISABLE_ENDPOINTS" ]]; then openstack endpoint create --region RegionOne \ --publicurl "http://$CONTROLLER_PUBLIC_ADDRESS:\$(public_port)s/v2.0" \ --adminurl "http://$CONTROLLER_ADMIN_ADDRESS:\$(admin_port)s/v2.0" \ --internalurl "http://$CONTROLLER_INTERNAL_ADDRESS:\$(public_port)s/v2.0" \ keystone fi # # Nova service # openstack service create --name=nova \ --description="Nova Compute Service" \ compute if [[ -z "$DISABLE_ENDPOINTS" ]]; then openstack endpoint create --region RegionOne \ --publicurl "http://$CONTROLLER_PUBLIC_ADDRESS:8774/v2/\$(tenant_id)s" \ --adminurl "http://$CONTROLLER_ADMIN_ADDRESS:8774/v2/\$(tenant_id)s" \ --internalurl "http://$CONTROLLER_INTERNAL_ADDRESS:8774/v2/\$(tenant_id)s" \ nova fi # # Volume service # openstack service create --name=volume \ --description="Cinder Volume Service" \ volume if [[ -z "$DISABLE_ENDPOINTS" ]]; then openstack endpoint create --region RegionOne \ --publicurl "http://$CONTROLLER_PUBLIC_ADDRESS:8776/v1/\$(tenant_id)s" \ --adminurl "http://$CONTROLLER_ADMIN_ADDRESS:8776/v1/\$(tenant_id)s" \ --internalurl "http://$CONTROLLER_INTERNAL_ADDRESS:8776/v1/\$(tenant_id)s" \ volume fi # # Image service # openstack service create --name=glance \ --description="Glance Image Service" \ image if [[ -z "$DISABLE_ENDPOINTS" ]]; then openstack endpoint create --region RegionOne \ --publicurl "http://$CONTROLLER_PUBLIC_ADDRESS:9292" \ --adminurl "http://$CONTROLLER_ADMIN_ADDRESS:9292" \ --internalurl "http://$CONTROLLER_INTERNAL_ADDRESS:9292" \ glance fi # # EC2 service # openstack service create --name=ec2 \ --description="EC2 Compatibility Layer" \ ec2 if [[ -z "$DISABLE_ENDPOINTS" ]]; then openstack endpoint create --region RegionOne \ --publicurl "http://$CONTROLLER_PUBLIC_ADDRESS:8773/services/Cloud" \ --adminurl "http://$CONTROLLER_ADMIN_ADDRESS:8773/services/Admin" \ --internalurl "http://$CONTROLLER_INTERNAL_ADDRESS:8773/services/Cloud" \ ec2 fi # # Swift service # openstack service create --name=swift \ --description="Swift Object Storage Service" \ object-store if [[ -z "$DISABLE_ENDPOINTS" ]]; then openstack endpoint create --region RegionOne \ --publicurl "http://$CONTROLLER_PUBLIC_ADDRESS:8080/v1/AUTH_\$(tenant_id)s" \ --adminurl "http://$CONTROLLER_ADMIN_ADDRESS:8080/v1" \ --internalurl "http://$CONTROLLER_INTERNAL_ADDRESS:8080/v1/AUTH_\$(tenant_id)s" \ swift fi # # Neutron service # openstack service create --name=neutron \ --description="Neutron Network Service" \ network if [[ -z "$DISABLE_ENDPOINTS" ]]; then openstack endpoint create --region RegionOne \ --publicurl "http://$CONTROLLER_PUBLIC_ADDRESS:9696" \ --adminurl "http://$CONTROLLER_ADMIN_ADDRESS:9696" \ --internalurl "http://$CONTROLLER_INTERNAL_ADDRESS:9696" \ neutron fi # create ec2 creds and parse the secret and access key returned ADMIN_USER=$(get_id openstack user show admin) RESULT=$(openstack ec2 credentials create --project service --user $ADMIN_USER) ADMIN_ACCESS=`echo "$RESULT" | grep access | awk '{print $4}'` ADMIN_SECRET=`echo "$RESULT" | grep secret | awk '{print $4}'` # write the secret and access to ec2rc cat > $EC2RC <`_. This is usually the best place to ask questions and find your way around. IRC stands for Internet Relay Chat and it is a way to chat online in real time. You can also ask a question and come back to the log files to read the answer later. Logs for the #openstack IRC channels are stored at ``_. For more information regarding OpenStack IRC channels please visit the `OpenStack IRC Wiki `_. OpenStack Wiki -------------- The wiki is a living source of knowledge. It is edited by the community, and has collections of links and other sources of information. Typically the pages are a good place to write drafts for specs or documentation, describe a blueprint, or collaborate with others. `OpenStack Wiki `_ * `useful Keystone project links `_ Keystone on Launchpad --------------------- Launchpad is a code hosting that OpenStack is using to track bugs, feature work, and releases of OpenStack. Like other OpenStack projects, Keystone source code is hosted on git.openstack.org * `Keystone Project Page on Launchpad `_ * `Keystone Source Repository `_ Within launchpad, we use `blueprints `_, to track feature work, and track `bugs `_ as well. If you are looking for a place to get started contributing to keystone, please look at any bugs for Keystone that are tagged as `low-hanging-fruit `_. OpenStack Blog -------------- The OpenStack blog includes a weekly newsletter that aggregates OpenStack news from around the internet, as well as providing inside information on upcoming events and posts from OpenStack contributors. `OpenStack Blog `_ See also: `Planet OpenStack `_, an aggregation of blogs about OpenStack from around the internet, combined into a web site and RSS feed. If you'd like to contribute with your blog posts, there are instructions for `adding your blog `_. Twitter ------- Because all the cool kids do it: `@openstack `_. Also follow the `#openstack `_ tag for relevant tweets. keystone-9.0.0/doc/source/configure_federation.rst0000664000567000056710000003444312701407102023504 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =================================== Configuring Keystone for Federation =================================== ----------- Definitions ----------- * `Service Provider (SP)`: provides a service to an end-user. * `Identity Provider (IdP)`: service that stores information about users and groups. * `SAML assertion`: contains information about a user as provided by an IdP. ----------------------------------- Keystone as a Service Provider (SP) ----------------------------------- .. NOTE:: This feature is considered stable and supported as of the Juno release. Prerequisites ------------- This approach to federation supports keystone as a Service Provider, consuming identity properties issued by an external Identity Provider, such as SAML assertions or OpenID Connect claims. Federated users are not mirrored in the keystone identity backend (for example, using the SQL driver). The external Identity Provider is responsible for authenticating users, and communicates the result of authentication to keystone using identity properties. Keystone maps these values to keystone user groups and assignments created in keystone. The following configuration steps were performed on a machine running Ubuntu 12.04 and Apache 2.2.22. To enable federation, you'll need to: 1. Run keystone under Apache, rather than using ``keystone-all``. 2. Configure Apache to use a federation capable authentication method. 3. Configure ``federation`` in keystone. Configure Apache to use a federation capable authentication method ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There is currently support for two major federation protocols: * SAML - Keystone supports the following implementations: * Shibboleth - see `Setup Shibboleth`_. * Mellon - see `Setup Mellon`_. * OpenID Connect - see `Setup OpenID Connect`_. .. _`Setup Shibboleth`: federation/shibboleth.html .. _`Setup OpenID Connect`: federation/openidc.html .. _`Setup Mellon`: federation/mellon.html Configure keystone and Horizon for Single Sign-On ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * To configure horizon to access a federated keystone, follow the steps outlined at: `Keystone Federation and Horizon`_. .. _`Keystone Federation and Horizon`: federation/websso.html Configuring Federation in Keystone ----------------------------------- Now that the Identity Provider and keystone are communicating we can start to configure ``federation``. 1. Configure authentication drivers in ``keystone.conf`` 2. Add local keystone groups and roles 3. Add Identity Provider(s), Mapping(s), and Protocol(s) Configure authentication drivers in ``keystone.conf`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. NOTE:: ``saml2`` has been deprecated as of the Mitaka release. Support for the ``saml2`` wrapper will be removed as of the "O" release. The recommended authentication method is ``mapped``, which supports ``saml2``. Add the authentication methods to the ``[auth]`` section in ``keystone.conf``. Names should be equal to protocol names added via Identity API v3. Here we use examples ``mapped`` and ``openid``. .. code-block:: bash [auth] methods = external,password,token,mapped,openid Create keystone groups and assign roles ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As mentioned earlier, no new users will be added to the Identity backend, but the Identity Service requires group-based role assignments to authorize federated users. The federation mapping function will map the user into local Identity Service groups objects, and hence to local role assignments. Thus, it is required to create the necessary Identity Service groups that correspond to the Identity Provider's groups; additionally, these groups should be assigned roles on one or more projects or domains. You may be interested in more information on `group management `_ and `role assignments `_, both of which are exposed to the CLI via `python-openstackclient `_. Add Identity Provider(s), Mapping(s), and Protocol(s) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To utilize federation the following must be created in the Identity Service: * Identity Provider * Mapping * Protocol More information on ``federation in keystone`` can be found `here `__. ~~~~~~~~~~~~~~~~~ Identity Provider ~~~~~~~~~~~~~~~~~ Create an Identity Provider object in keystone, which represents the Identity Provider we will use to authenticate end users. More information on identity providers can be found `here `__. ~~~~~~~ Mapping ~~~~~~~ A mapping is a list of rules. The only Identity API objects that will support mapping are groups and users. Mapping adds a set of rules to map federation protocol attributes to Identity API objects. There are many different ways to setup as well as combine these rules. More information on rules can be found on the :doc:`mapping_combinations` page. An Identity Provider has exactly one mapping specified per protocol. Mapping objects can be used multiple times by different combinations of Identity Provider and Protocol. More information on mapping can be found `here `__. ~~~~~~~~ Protocol ~~~~~~~~ A protocol contains information that dictates which Mapping rules to use for an incoming request made by an IdP. An IdP may have multiple supported protocols. Add `Protocol object `__ and specify the mapping id you want to use with the combination of the IdP and Protocol. Performing federated authentication ----------------------------------- 1. Authenticate externally and generate an unscoped token in keystone 2. Determine accessible resources 3. Get a scoped token Get an unscoped token ~~~~~~~~~~~~~~~~~~~~~ Unlike other authentication methods in the Identity Service, the user does not issue an HTTP POST request with authentication data in the request body. To start federated authentication a user must access the dedicated URL with Identity Provider's and Protocol's identifiers stored within a protected URL. The URL has a format of: ``/v3/OS-FEDERATION/identity_providers/{idp_id}/protocols/{protocol_id}/auth``. In this instance we follow a standard SAML2 authentication procedure, that is, the user will be redirected to the Identity Provider's authentication webpage and be prompted for credentials. After successfully authenticating the user will be redirected to the Service Provider's endpoint. If using a web browser, a token will be returned in XML format. In the returned unscoped token, a list of Identity Service groups the user belongs to will be included. More information on getting an unscoped token can be found `here `__. ~~~~~~~~~~~~ Example cURL ~~~~~~~~~~~~ Note that the request does not include a body. The following url would be considered protected by ``mod_shib`` and Apache, as such a request made to the URL would be redirected to the Identity Provider, to start the SAML authentication procedure. .. code-block:: bash $ curl -X GET -D - http://localhost:5000/v3/OS-FEDERATION/identity_providers/{idp_id}/protocols/{protocol_id}/auth Determine accessible resources ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ By using the previously returned token, the user can issue requests to the list projects and domains that are accessible. * List projects a federated user can access: ``GET /OS-FEDERATION/projects`` * List domains a federated user can access: ``GET /OS-FEDERATION/domains`` More information on listing resources can be found `here `__. ~~~~~~~~~~~~ Example cURL ~~~~~~~~~~~~ .. code-block:: bash $ curl -X GET -H "X-Auth-Token: " http://localhost:5000/v3/OS-FEDERATION/projects or .. code-block:: bash $ curl -X GET -H "X-Auth-Token: " http://localhost:5000/v3/OS-FEDERATION/domains Get a scoped token ~~~~~~~~~~~~~~~~~~ A federated user may request a scoped token, by using the unscoped token. A project or domain may be specified by either ``id`` or ``name``. An ``id`` is sufficient to uniquely identify a project or domain. More information on getting a scoped token can be found `here `__. ~~~~~~~~~~~~ Example cURL ~~~~~~~~~~~~ .. code-block:: bash $ curl -X POST -H "Content-Type: application/json" -d '{"auth":{"identity":{"methods":["mapped"],"saml2":{"id":""}},"scope":{"project":{"domain": {"name": "Default"},"name":"service"}}}}' -D - http://localhost:5000/v3/auth/tokens -------------------------------------- Keystone as an Identity Provider (IdP) -------------------------------------- .. NOTE:: This feature is experimental and unsupported in Juno (with several issues that will not be backported). These issues have been fixed and this feature is considered stable and supported as of the Kilo release. .. NOTE:: This feature requires installation of the xmlsec1 tool via your distribution packaging system (for instance apt or yum) Example for apt: .. code-block:: bash $ apt-get install xmlsec1 Configuration Options --------------------- There are certain settings in ``keystone.conf`` that must be setup, prior to attempting to federate multiple keystone deployments. Within ``keystone.conf``, assign values to the ``[saml]`` related fields, for example: .. code-block:: ini [saml] certfile=/etc/keystone/ssl/certs/ca.pem keyfile=/etc/keystone/ssl/private/cakey.pem idp_entity_id=https://keystone.example.com/v3/OS-FEDERATION/saml2/idp idp_sso_endpoint=https://keystone.example.com/v3/OS-FEDERATION/saml2/sso idp_metadata_path=/etc/keystone/saml2_idp_metadata.xml Though not necessary, the follow Organization configuration options should also be setup. It is recommended that these values be URL safe. .. code-block:: ini idp_organization_name=example_company idp_organization_display_name=Example Corp. idp_organization_url=example.com As with the Organization options, the Contact options, are not necessary, but it's advisable to set these values too. .. code-block:: ini idp_contact_company=example_company idp_contact_name=John idp_contact_surname=Smith idp_contact_email=jsmith@example.com idp_contact_telephone=555-55-5555 idp_contact_type=technical Generate Metadata ----------------- In order to create a trust between the IdP and SP, metadata must be exchanged. To create metadata for your keystone IdP, run the ``keystone-manage`` command and pipe the output to a file. For example: .. code-block:: bash $ keystone-manage saml_idp_metadata > /etc/keystone/saml2_idp_metadata.xml .. NOTE:: The file location should match the value of the configuration option ``idp_metadata_path`` that was assigned in the previous section. Create a Service Provider (SP) ------------------------------ In this example we are creating a new Service Provider with an ID of ``BETA``, a ``sp_url`` of ``http://beta.example.com/Shibboleth.sso/SAML2/ECP`` and a ``auth_url`` of ``http://beta.example.com:5000/v3/OS-FEDERATION/identity_providers/beta/protocols/saml2/auth`` . The ``sp_url`` will be used when creating a SAML assertion for ``BETA`` and signed by the current keystone IdP. The ``auth_url`` is used to retrieve the token for ``BETA`` once the SAML assertion is sent. Although the ``enabled`` field is optional we are passing it set to ``true`` otherwise it will be set to ``false`` by default. .. code-block:: bash $ curl -s -X PUT \ -H "X-Auth-Token: $OS_TOKEN" \ -H "Content-Type: application/json" \ -d '{"service_provider": {"auth_url": "http://beta.example.com:5000/v3/OS-FEDERATION/identity_providers/beta/protocols/saml2/auth", "sp_url": "https://example.com:5000/Shibboleth.sso/SAML2/ECP", "enabled": true}}' \ http://localhost:5000/v3/OS-FEDERATION/service_providers/BETA | python -mjson.tool Testing it all out ------------------ Lastly, if a scoped token and a Service Provider scope are presented to the local keystone, the result will be a full ECP wrapped SAML Assertion, specifically intended for the Service Provider keystone. .. NOTE:: ECP stands for Enhanced Client or Proxy, an extension from the SAML2 protocol used in non-browser interfaces, like in the following example with cURL. .. code-block:: bash $ curl -s -X POST \ -H "Content-Type: application/json" \ -d '{"auth": {"scope": {"service_provider": {"id": "BETA"}}, "identity": {"token": {"id": "d793d935b9c343f783955cf39ee7dc3c"}, "methods": ["token"]}}}' \ http://localhost:5000/v3/auth/OS-FEDERATION/saml2/ecp .. NOTE:: Use URL http://localhost:5000/v3/auth/OS-FEDERATION/saml2 to request for pure SAML Assertions. At this point the ECP wrapped SAML Assertion can be sent to the Service Provider keystone using the provided ``auth_url`` in the ``X-Auth-Url`` header present in the response containing the Assertion, and a valid OpenStack token, issued by a Service Provider keystone, will be returned. keystone-9.0.0/doc/source/configuration.rst0000664000567000056710000022766312701407102022202 0ustar jenkinsjenkins00000000000000.. Copyright 2011-2012 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==================== Configuring Keystone ==================== .. toctree:: :maxdepth: 1 man/keystone-manage man/keystone-all Once Keystone is installed, it is configured via a primary configuration file (``etc/keystone.conf``), a PasteDeploy configuration file (``etc/keystone-paste.ini``), possibly a separate logging configuration file, and initializing data into Keystone using the command line client. By default, Keystone starts a service on `IANA-assigned port 35357 `_. This may overlap with your system's ephemeral port range, so another process may already be using this port without being explicitly configured to do so. To prevent this scenario from occurring, it's recommended that you explicitly exclude port 35357 from the available ephemeral port range. On a Linux system, this would be accomplished by: .. code-block:: bash $ sysctl -w 'sys.net.ipv4.ip_local_reserved_ports=35357' To make the above change persistent, ``net.ipv4.ip_local_reserved_ports = 35357`` should be added to ``/etc/sysctl.conf`` or to ``/etc/sysctl.d/keystone.conf``. Starting and Stopping Keystone under Eventlet ============================================= .. WARNING:: Running keystone under eventlet has been deprecated as of the Kilo release. Support for utilizing eventlet will be removed as of the M-release. The recommended deployment is to run keystone in a WSGI server such as Apache httpd with ``mod_wsgi``. Keystone can be run using either its built-in eventlet server or it can be run embedded in a web server. While the eventlet server is convenient and easy to use, it's lacking in security features that have been developed into Internet- based web servers over the years. As such, running the eventlet server as described in this section is not recommended. Start Keystone services using the command: .. code-block:: bash $ keystone-all Invoking this command starts up two ``wsgi.Server`` instances, ``admin`` (the administration API) and ``main`` (the primary/public API interface). Both services are configured to run in a single process. .. NOTE:: The separation into ``admin`` and ``main`` interfaces is a historical anomaly. The new V3 API provides the same interface on both the admin and main interfaces (this can be configured in ``keystone-paste.ini``, but the default is to have both the same). The V2.0 API provides a limited public API (getting and validating tokens) on ``main``, and an administrative API (which can include creating users and such) on the ``admin`` interface. Stop the process using ``Control-C``. .. NOTE:: If you have not already configured Keystone, it may not start as expected. Configuration Files =================== The Keystone configuration files are an ``ini`` file format based on Paste_, a common system used to configure Python WSGI based applications. The PasteDeploy configuration entries (WSGI pipeline definitions) can be provided in a separate ``keystone-paste.ini`` file, while general and driver-specific configuration parameters are in the primary configuration file ``keystone.conf``. .. NOTE:: Since keystone's PasteDeploy configuration file has been separated from the main keystone configuration file, ``keystone.conf``, all local configuration or driver-specific configuration parameters must go in the main keystone configuration file instead of the PasteDeploy configuration file, i.e. configuration in ``keystone-paste.ini`` is not supported. The primary configuration file is organized into the following sections: * ``[DEFAULT]`` - General configuration * ``[assignment]`` - Assignment system driver configuration * ``[auth]`` - Authentication plugin configuration * ``[cache]`` - Caching layer configuration * ``[catalog]`` - Service catalog driver configuration * ``[credential]`` - Credential system driver configuration * ``[endpoint_filter]`` - Endpoint filtering configuration * ``[endpoint_policy]`` - Endpoint policy configuration * ``[eventlet_server]`` - Eventlet server configuration * ``[eventlet_server_ssl]`` - Eventlet server SSL configuration * ``[federation]`` - Federation driver configuration * ``[identity]`` - Identity system driver configuration * ``[identity_mapping]`` - Identity mapping system driver configuration * ``[kvs]`` - KVS storage backend configuration * ``[ldap]`` - LDAP configuration options * ``[memcache]`` - Memcache configuration options * ``[oauth1]`` - OAuth 1.0a system driver configuration * ``[os_inherit]`` - Inherited role assignment configuration * ``[paste_deploy]`` - Pointer to the PasteDeploy configuration file * ``[policy]`` - Policy system driver configuration for RBAC * ``[resource]`` - Resource system driver configuration * ``[revoke]`` - Revocation system driver configuration * ``[role]`` - Role system driver configuration * ``[saml]`` - SAML configuration options * ``[signing]`` - Cryptographic signatures for PKI based tokens * ``[ssl]`` - SSL certificate generation configuration * ``[token]`` - Token driver & token provider configuration * ``[trust]`` - Trust configuration The Keystone primary configuration file is expected to be named ``keystone.conf``. When starting Keystone, you can specify a different configuration file to use with ``--config-file``. If you do **not** specify a configuration file, Keystone will look in the following directories for a configuration file, in order: * ``~/.keystone/`` * ``~/`` * ``/etc/keystone/`` * ``/etc/`` PasteDeploy configuration file is specified by the ``config_file`` parameter in ``[paste_deploy]`` section of the primary configuration file. If the parameter is not an absolute path, then Keystone looks for it in the same directories as above. If not specified, WSGI pipeline definitions are loaded from the primary configuration file. Domain-specific Drivers ----------------------- Keystone supports the option (disabled by default) to specify identity driver configurations on a domain by domain basis, allowing, for example, a specific domain to have its own LDAP or SQL server. This is configured by specifying the following options: .. code-block:: ini [identity] domain_specific_drivers_enabled = True domain_config_dir = /etc/keystone/domains Setting ``domain_specific_drivers_enabled`` to ``True`` will enable this feature, causing Keystone to look in the ``domain_config_dir`` for config files of the form:: keystone..conf Options given in the domain specific configuration file will override those in the primary configuration file for the specified domain only. Domains without a specific configuration file will continue to use the options from the primary configuration file. Keystone also supports the ability to store the domain-specific configuration options in the keystone SQL database, managed via the Identity API, as opposed to using domain-specific configuration files. .. NOTE:: The ability to store and manage configuration options via the Identity API is new and experimental in Kilo. This capability (which is disabled by default) is enabled by specifying the following options in the main keystone configuration file: .. code-block:: ini [identity] domain_specific_drivers_enabled = true domain_configurations_from_database = true Once enabled, any existing domain-specific configuration files in the configuration directory will be ignored and only those domain-specific configuration options specified via the Identity API will be used. Unlike the file-based method of specifying domain-specific configurations, options specified via the Identity API will become active without needing to restart the keystone server. For performance reasons, the current state of configuration options for a domain are cached in the keystone server, and in multi-process and multi-threaded keystone configurations, the new configuration options may not become active until the cache has timed out. The cache settings for domain config options can be adjusted in the general keystone configuration file (option ``cache_time`` in the ``domain_config`` group). .. NOTE:: It is important to notice that when using either of these methods of specifying domain-specific configuration options, the main keystone configuration file is still maintained. Only those options that relate to the Identity driver for users and groups (i.e. specifying whether the driver for this domain is SQL or LDAP, and, if LDAP, the options that define that connection) are supported in a domain-specific manner. Further, when using the configuration options via the Identity API, the driver option must be set to an LDAP driver (attempting to set it to an SQL driver will generate an error when it is subsequently used). For existing installations that already use file-based domain-specific configurations who wish to migrate to the SQL-based approach, the ``keystone-manage`` command can be used to upload all configuration files to the SQL database: .. code-block:: bash $ keystone-manage domain_config_upload --all Once uploaded, these domain-configuration options will be visible via the Identity API as well as applied to the domain-specific drivers. It is also possible to upload individual domain-specific configuration files by specifying the domain name: .. code-block:: bash $ keystone-manage domain_config_upload --domain-name DOMAINA .. NOTE:: It is important to notice that by enabling either of the domain-specific configuration methods, the operations of listing all users and listing all groups are not supported, those calls will need either a domain filter to be specified or usage of a domain scoped token. .. NOTE:: Keystone does not support moving the contents of a domain (i.e. "its" users and groups) from one backend to another, nor group membership across backend boundaries. .. NOTE:: When using the file-based domain-specific configuration method, to delete a domain that uses a domain specific backend, it's necessary to first disable it, remove its specific configuration file (i.e. its corresponding keystone..conf) and then restart the Identity server. When managing configuration options via the Identity API, the domain can simply be disabled and deleted via the Identity API; since any domain-specific configuration options will automatically be removed. .. NOTE:: Although Keystone supports multiple LDAP backends via the above domain-specific configuration methods, it currently only supports one SQL backend. This could be either the default driver or a single domain-specific backend, perhaps for storing service users in a predominantly LDAP installation. Due to the need for user and group IDs to be unique across an OpenStack installation and for Keystone to be able to deduce which domain and backend to use from just a user or group ID, it dynamically builds a persistent identity mapping table from a public ID to the actual domain, local ID (within that backend) and entity type. The public ID is automatically generated by Keystone when it first encounters the entity. If the local ID of the entity is from a backend that does not guarantee to generate UUIDs, a hash algorithm will generate a public ID for that entity, which is what will be exposed by Keystone. The use of a hash will ensure that if the public ID needs to be regenerated then the same public ID will be created. This is useful if you are running multiple keystones and want to ensure the same ID would be generated whichever server you hit. While Keystone will dynamically maintain the identity mapping, including removing entries when entities are deleted via the Keystone, for those entities in backends that are managed outside of Keystone (e.g. a Read Only LDAP), Keystone will not know if entities have been deleted and hence will continue to carry stale identity mappings in its table. While benign, keystone provides an ability for operators to purge the mapping table of such stale entries using the keystone-manage command, for example: .. code-block:: bash $ keystone-manage mapping_purge --domain-name DOMAINA --local-id abc@de.com A typical usage would be for an operator to obtain a list of those entries in an external backend that had been deleted out-of-band to Keystone, and then call keystone-manage to purge those entries by specifying the domain and local-id. The type of the entity (i.e. user or group) may also be specified if this is needed to uniquely identify the mapping. Since public IDs can be regenerated **with the correct generator implementation**, if the details of those entries that have been deleted are not available, then it is safe to simply bulk purge identity mappings periodically, for example: .. code-block:: bash $ keystone-manage mapping_purge --domain-name DOMAINA will purge all the mappings for DOMAINA. The entire mapping table can be purged with the following command: .. code-block:: bash $ keystone-manage mapping_purge --all Public ID Generators -------------------- Keystone supports a customizable public ID generator and it is specified in the ``[identity_mapping]`` section of the configuration file. Keystone provides a sha256 generator as default, which produces regeneratable public IDs. The generator algorithm for public IDs is a balance between key size (i.e. the length of the public ID), the probability of collision and, in some circumstances, the security of the public ID. The maximum length of public ID supported by Keystone is 64 characters, and the default generator (sha256) uses this full capability. Since the public ID is what is exposed externally by Keystone and potentially stored in external systems, some installations may wish to make use of other generator algorithms that have a different trade-off of attributes. A different generator can be installed by configuring the following property: * ``generator`` - identity mapping generator. Defaults to ``sha256`` (implemented by :class:`keystone.identity.id_generators.sha256.Generator`) .. WARNING:: Changing the generator may cause all existing public IDs to be become invalid, so typically the generator selection should be considered immutable for a given installation. Authentication Plugins ---------------------- .. NOTE:: This feature is only supported by Keystone for the Identity API v3 clients. Keystone supports authentication plugins and they are specified in the ``[auth]`` section of the configuration file. However, an authentication plugin may also have its own section in the configuration file. It is up to the plugin to register its own configuration options. * ``methods`` - comma-delimited list of authentication plugin names * ```` - specify the class which handles to authentication method, in the same manner as one would specify a backend driver. Keystone provides three authentication methods by default. ``password`` handles password authentication and ``token`` handles token authentication. ``external`` is used in conjunction with authentication performed by a container web server that sets the ``REMOTE_USER`` environment variable. For more details, refer to :doc:`External Authentication `. How to Implement an Authentication Plugin ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ All authentication plugins must extend the :class:`keystone.auth.core.AuthMethodHandler` class and implement the ``authenticate()`` method. The ``authenticate()`` method expects the following parameters. * ``context`` - Keystone's request context * ``auth_payload`` - the content of the authentication for a given method * ``auth_context`` - user authentication context, a dictionary shared by all plugins. It contains ``method_names`` and ``extras`` by default. ``method_names`` is a list and ``extras`` is a dictionary. If successful, the ``authenticate()`` method must provide a valid ``user_id`` in ``auth_context`` and return ``None``. ``method_name`` is used to convey any additional authentication methods in case authentication is for re-scoping. For example, if the authentication is for re-scoping, a plugin must append the previous method names into ``method_names``. Also, a plugin may add any additional information into ``extras``. Anything in ``extras`` will be conveyed in the token's ``extras`` field. If authentication requires multiple steps, the ``authenticate()`` method must return the payload in the form of a dictionary for the next authentication step. If authentication is unsuccessful, the ``authenticate()`` method must raise a :class:`keystone.exception.Unauthorized` exception. Simply add the new plugin name to the ``methods`` list along with your plugin class configuration in the ``[auth]`` sections of the configuration file to deploy it. If the plugin requires additional configurations, it may register its own section in the configuration file. Plugins are invoked in the order in which they are specified in the ``methods`` attribute of the ``authentication`` request body. If multiple plugins are invoked, all plugins must succeed in order to for the entire authentication to be successful. Furthermore, all the plugins invoked must agree on the ``user_id`` in the ``auth_context``. The ``REMOTE_USER`` environment variable is only set from a containing webserver. However, to ensure that a user must go through other authentication mechanisms, even if this variable is set, remove ``external`` from the list of plugins specified in ``methods``. This effectively disables external authentication. For more details, refer to :doc:`ExternalAuthentication `. Token Persistence Driver ------------------------ Keystone supports customizable token persistence drivers. These can be specified in the ``[token]`` section of the configuration file. Keystone provides three non-test persistence backends. These can be set with the ``[token] driver`` configuration option. The drivers Keystone provides are: * ``memcache_pool`` - The pooled memcached token persistence engine. This backend supports the concept of pooled memcache client object (allowing for the re-use of the client objects). This backend has a number of extra tunable options in the ``[memcache]`` section of the config. Implemented by :class:`keystone.token.persistence.backends.memcache_pool.Token` * ``sql`` - The SQL-based (default) token persistence engine. Implemented by :class:`keystone.token.persistence.backends.sql.Token` * ``memcache`` - The memcached based token persistence backend. This backend relies on ``dogpile.cache`` and stores the token data in a set of memcached servers. The servers URLs are specified in the ``[memcache] servers`` configuration option in the Keystone config. Implemented by :class:`keystone.token.persistence.backends.memcache.Token` .. WARNING:: It is recommended you use the ``memcache_pool`` backend instead of ``memcache`` as the token persistence driver if you are deploying Keystone under eventlet instead of Apache httpd with ``mod_wsgi``. This recommendation is due to known issues with the use of ``thread.local`` under eventlet that can allow the leaking of memcache client objects and consumption of extra sockets. Token Provider -------------- Keystone supports customizable token provider and it is specified in the ``[token]`` section of the configuration file. Keystone provides both UUID and PKI token providers. However, users may register their own token provider by configuring the following property. * ``provider`` - token provider driver. Defaults to ``uuid``. Implemented by :class:`keystone.token.providers.uuid.Provider` UUID, PKI, PKIZ, or Fernet? ^^^^^^^^^^^^^^^^^^^^^^^^^^^ Each token format uses different technologies to achieve various performance, scaling and architectural requirements. UUID tokens contain randomly generated UUID4 payloads that are issued and validated by the identity service. They are encoded using their hex digest for transport and are thus URL-friendly. They must be persisted by the identity service in order to be later validated. Revoking them is simply a matter of deleting them from the token persistence backend. Both PKI and PKIZ tokens contain JSON payloads that represent the entire token validation response that would normally be retrieved from keystone. The payload is then signed using `Cryptographic Message Syntax (CMS) `_. The combination of CMS and the exhaustive payload allows PKI and PKIZ tokens to be verified offline using keystone's public signing key. The only reason for them to be persisted by the identity service is to later build token revocation *lists* (explicit lists of tokens that have been revoked), otherwise they are theoretically ephemeral when supported by token revocation *events* (which describe invalidated tokens rather than enumerate them). PKIZ tokens add zlib compression after signing to achieve a smaller overall token size. To make them URL-friendly, PKI tokens are base64 encoded and then arbitrarily manipulated to replace unsafe characters with safe ones whereas PKIZ tokens use conventional base64url encoding. Due to the size of the payload and the overhead incurred by the CMS format, both PKI and PKIZ tokens may be too long to fit in either headers or URLs if they contain extensive service catalogs or other additional attributes. Some third-party applications such as web servers and clients may need to be recompiled from source to customize the limitations that PKI and PKIZ tokens would otherwise exceed). Both PKI and PKIZ tokens require signing certificates which may be created using ``keystone-manage pki_setup`` for demonstration purposes (this is not recommended for production deployments: use certificates issued by an trusted CA instead). Fernet tokens contain a limited amount of identity and authorization data in a `MessagePacked `_ payload. The payload is then wrapped as a `Fernet `_ message for transport, where Fernet provides the required web safe characteristics for use in URLs and headers. Fernet tokens require symmetric encryption keys which can be established using ``keystone-manage fernet_setup`` and periodically rotated using ``keystone-manage fernet_rotate``. .. WARNING:: UUID, PKI, PKIZ, and Fernet tokens are all bearer tokens, meaning that they must be protected from unnecessary disclosure to prevent unauthorized access. Caching Layer ------------- Keystone supports a caching layer that is above the configurable subsystems (e.g. ``token``, ``identity``, etc). Keystone uses the `dogpile.cache`_ library which allows for flexible cache backends. The majority of the caching configuration options are set in the ``[cache]`` section. However, each section that has the capability to be cached usually has a ``caching`` boolean value that will toggle caching for that specific section. The current default behavior is that subsystem caching is enabled, but the global toggle is set to disabled. ``[cache]`` configuration section: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ * ``enabled`` - enables/disables caching across all of keystone * ``debug_cache_backend`` - enables more in-depth logging from the cache backend (get, set, delete, etc) * ``backend`` - the caching backend module to use e.g. ``dogpile.cache.memcached`` .. NOTE:: A given ``backend`` must be registered with ``dogpile.cache`` before it can be used. The default backend is the ``Keystone`` no-op backend (``keystone.common.cache.noop``). If caching is desired a different backend will need to be specified. Current functional backends are: * ``dogpile.cache.memcached`` - Memcached backend using the standard `python-memcached`_ library (recommended for use with Apache httpd with ``mod_wsgi``) * ``dogpile.cache.pylibmc`` - Memcached backend using the `pylibmc`_ library * ``dogpile.cache.bmemcached`` - Memcached using `python-binary-memcached`_ library. * ``dogpile.cache.redis`` - `Redis`_ backend * ``dogpile.cache.dbm`` - local DBM file backend * ``dogpile.cache.memory`` - in-memory cache * ``keystone.cache.mongo`` - MongoDB as caching backend * ``keystone.cache.memcache_pool`` - An eventlet-safe implementation of ``dogpile.cache.memcached``. This implementation also provides client connection re-use. .. WARNING:: ``dogpile.cache.memory`` is not suitable for use outside of unit testing as it does not cleanup its internal cache on cache expiration, does not provide isolation to the cached data (values in the store can be inadvertently changed without extra layers of data protection added), and does not share cache between processes. This means that caching and cache invalidation will not be consistent or reliable when using ``Keystone`` and the ``dogpile.cache.memory`` backend under any real workload. .. WARNING:: Do not use ``dogpile.cache.memcached`` backend if you are deploying Keystone under eventlet. There are known issues with the use of ``thread.local`` under eventlet that can allow the leaking of memcache client objects and consumption of extra sockets. * ``expiration_time`` - int, the default length of time to cache a specific value. A value of ``0`` indicates to not cache anything. It is recommended that the ``enabled`` option be used to disable cache instead of setting this to ``0``. * ``backend_argument`` - an argument passed to the backend when instantiated ``backend_argument`` should be specified once per argument to be passed to the backend and in the format of ``:``. e.g.: ``backend_argument = host:localhost`` * ``proxies`` - comma delimited list of `ProxyBackends`_ e.g. ``my.example.Proxy, my.example.Proxy2`` Current Keystone systems that have caching capabilities: * ``token`` The token system has a separate ``cache_time`` configuration option, that can be set to a value above or below the global ``expiration_time`` default, allowing for different caching behavior from the other systems in ``Keystone``. This option is set in the ``[token]`` section of the configuration file. The Token Revocation List cache time is handled by the configuration option ``revocation_cache_time`` in the ``[token]`` section. The revocation list is refreshed whenever a token is revoked. It typically sees significantly more requests than specific token retrievals or token validation calls. * ``resource`` The resource system has a separate ``cache_time`` configuration option, that can be set to a value above or below the global ``expiration_time`` default, allowing for different caching behavior from the other systems in ``Keystone``. This option is set in the ``[resource]`` section of the configuration file. Currently ``resource`` has caching for ``project`` and ``domain`` specific requests (primarily around the CRUD actions). The ``list_projects`` and ``list_domains`` methods are not subject to caching. .. WARNING:: Be aware that if a read-only ``resource`` backend is in use, the cache will not immediately reflect changes on the back end. Any given change may take up to the ``cache_time`` (if set in the ``[resource]`` section of the configuration) or the global ``expiration_time`` (set in the ``[cache]`` section of the configuration) before it is reflected. If this type of delay (when using a read-only ``resource`` backend) is an issue, it is recommended that caching be disabled on ``resource``. To disable caching specifically on ``resource``, in the ``[resource]`` section of the configuration set ``caching`` to ``False``. * ``role`` Currently ``role`` has caching for ``get_role``, but not for ``list_roles``. The role system has a separate ``cache_time`` configuration option, that can be set to a value above or below the global ``expiration_time`` default, allowing for different caching behavior from the other systems in ``Keystone``. This option is set in the ``[role]`` section of the configuration file. .. WARNING:: Be aware that if a read-only ``role`` backend is in use, the cache will not immediately reflect changes on the back end. Any given change may take up to the ``cache_time`` (if set in the ``[role]`` section of the configuration) or the global ``expiration_time`` (set in the ``[cache]`` section of the configuration) before it is reflected. If this type of delay (when using a read-only ``role`` backend) is an issue, it is recommended that caching be disabled on ``role``. To disable caching specifically on ``role``, in the ``[role]`` section of the configuration set ``caching`` to ``False``. For more information about the different backends (and configuration options): * `dogpile.cache.backends.memory`_ * `dogpile.cache.backends.memcached`_ * `dogpile.cache.backends.redis`_ * `dogpile.cache.backends.file`_ * :py:mod:`keystone.common.cache.backends.mongo` .. _`dogpile.cache`: http://dogpilecache.readthedocs.org/en/latest/ .. _`python-memcached`: http://www.tummy.com/software/python-memcached/ .. _`pylibmc`: http://sendapatch.se/projects/pylibmc/index.html .. _`python-binary-memcached`: https://github.com/jaysonsantos/python-binary-memcached .. _`Redis`: http://redis.io/ .. _`dogpile.cache.backends.memory`: http://dogpilecache.readthedocs.org/en/latest/api.html#memory-backend .. _`dogpile.cache.backends.memcached`: http://dogpilecache.readthedocs.org/en/latest/api.html#memcached-backends .. _`dogpile.cache.backends.redis`: http://dogpilecache.readthedocs.org/en/latest/api.html#redis-backends .. _`dogpile.cache.backends.file`: http://dogpilecache.readthedocs.org/en/latest/api.html#file-backends .. _`ProxyBackends`: http://dogpilecache.readthedocs.org/en/latest/api.html#proxy-backends Certificates for PKI -------------------- PKI stands for Public Key Infrastructure. Tokens are documents, cryptographically signed using the X509 standard. In order to work correctly token generation requires a public/private key pair. The public key must be signed in an X509 certificate, and the certificate used to sign it must be available as Certificate Authority (CA) certificate. These files can be either externally generated or generated using the ``keystone-manage`` utility. The files used for signing and verifying certificates are set in the Keystone configuration file. The private key should only be readable by the system user that will run Keystone. The values that specify the certificates are under the ``[signing]`` section of the configuration file. The configuration values are: * ``certfile`` - Location of certificate used to verify tokens. Default is ``/etc/keystone/ssl/certs/signing_cert.pem`` * ``keyfile`` - Location of private key used to sign tokens. Default is ``/etc/keystone/ssl/private/signing_key.pem`` * ``ca_certs`` - Location of certificate for the authority that issued the above certificate. Default is ``/etc/keystone/ssl/certs/ca.pem`` Signing Certificate Issued by External CA ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ You may use a signing certificate issued by an external CA instead of generated by ``keystone-manage``. However, certificate issued by external CA must satisfy the following conditions: * all certificate and key files must be in Privacy Enhanced Mail (PEM) format * private key files must not be protected by a password The basic workflow for using a signing certificate issued by an external CA involves: 1. `Request Signing Certificate from External CA`_ 2. Convert certificate and private key to PEM if needed 3. `Install External Signing Certificate`_ Request Signing Certificate from External CA ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ One way to request a signing certificate from an external CA is to first generate a PKCS #10 Certificate Request Syntax (CRS) using OpenSSL CLI. First create a certificate request configuration file (e.g. ``cert_req.conf``): .. code-block:: ini [ req ] default_bits = 2048 default_keyfile = keystonekey.pem default_md = default prompt = no distinguished_name = distinguished_name [ distinguished_name ] countryName = US stateOrProvinceName = CA localityName = Sunnyvale organizationName = OpenStack organizationalUnitName = Keystone commonName = Keystone Signing emailAddress = keystone@openstack.org Then generate a CRS with OpenSSL CLI. **Do not encrypt the generated private key. The -nodes option must be used.** For example: .. code-block:: bash $ openssl req -newkey rsa:2048 -keyout signing_key.pem -keyform PEM -out signing_cert_req.pem -outform PEM -config cert_req.conf -nodes If everything is successfully, you should end up with ``signing_cert_req.pem`` and ``signing_key.pem``. Send ``signing_cert_req.pem`` to your CA to request a token signing certificate and make sure to ask the certificate to be in PEM format. Also, make sure your trusted CA certificate chain is also in PEM format. Install External Signing Certificate ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Assuming you have the following already: * ``signing_cert.pem`` - (Keystone token) signing certificate in PEM format * ``signing_key.pem`` - corresponding (non-encrypted) private key in PEM format * ``cacert.pem`` - trust CA certificate chain in PEM format Copy the above to your certificate directory. For example: .. code-block:: bash $ mkdir -p /etc/keystone/ssl/certs $ cp signing_cert.pem /etc/keystone/ssl/certs/ $ cp signing_key.pem /etc/keystone/ssl/certs/ $ cp cacert.pem /etc/keystone/ssl/certs/ $ chmod -R 700 /etc/keystone/ssl/certs **Make sure the certificate directory is root-protected.** If your certificate directory path is different from the default ``/etc/keystone/ssl/certs``, make sure it is reflected in the ``[signing]`` section of the configuration file. Generating a Signing Certificate using pki_setup ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ``keystone-manage pki_setup`` is a development tool. We recommend that you do not use ``keystone-manage pki_setup`` in a production environment. In production, an external CA should be used instead. This is because the CA secret key should generally be kept apart from the token signing secret keys so that a compromise of a node does not lead to an attacker being able to generate valid signed Keystone tokens. This is a low probability attack vector, as compromise of a Keystone service machine's filesystem security almost certainly means the attacker will be able to gain direct access to the token backend. When using the ``keystone-manage pki_setup`` to generate the certificates, the following configuration options in the ``[signing]`` section are used: * ``ca_key`` - Default is ``/etc/keystone/ssl/private/cakey.pem`` * ``key_size`` - Default is ``2048`` * ``valid_days`` - Default is ``3650`` If ``keystone-manage pki_setup`` is not used then these options don't need to be set. Encryption Keys for Fernet -------------------------- ``keystone-manage fernet_setup`` will attempt to create a key repository as configured in the ``[fernet_tokens]`` section of ``keystone.conf`` and bootstrap it with encryption keys. A single 256-bit key is actually composed of two smaller keys: a 128-bit key used for SHA256 HMAC signing and a 128-bit key used for AES encryption. See the `Fernet token `_ specification for more detail. ``keystone-manage fernet_rotate`` will rotate encryption keys through the following states: * **Staged key**: In a key rotation, a new key is introduced into the rotation in this state. Only one key is considered to be the *staged* key at any given time. This key will become the *primary* during the *next* key rotation. This key is only used to validate tokens and serves to avoid race conditions in multi-node deployments (all nodes should recognize all *primary* keys in the deployment at all times). In a multi-node Keystone deployment this would allow for the *staged* key to be replicated to all Keystone nodes before being promoted to *primary* on a single node. This prevents the case where a *primary* key is created on one Keystone node and tokens encrypted/signed with that new *primary* are rejected on another Keystone node because the new *primary* doesn't exist there yet. * **Primary key**: In a key rotation, the old *staged* key is promoted to be the *primary*. Only one key is considered to be the *primary* key at any given time. This is the key used to generate new tokens. This key is also used to validate previously generated tokens. * **Secondary keys**: In a key rotation, the old *primary* key is demoted to be a *secondary* key. *Secondary* keys are only used to validate previously generated tokens. You can maintain any number of *secondary* keys, up to ``[fernet_tokens] max_active_keys`` (where "active" refers to the sum of all recognized keys in any state: *staged*, *primary* or *secondary*). When ``max_active_keys`` is exceeded during a key rotation, the oldest keys are discarded. When a new primary key is created, all new tokens will be encrypted using the new primary key. The old primary key is demoted to a secondary key, which can still be used for validating tokens. Excess secondary keys (beyond ``[fernet_tokens] max_active_keys``) are revoked. Revoked keys are permanently deleted. Rotating keys too frequently, or with ``[fernet_tokens] max_active_keys`` set too low, will cause tokens to become invalid prior to their expiration. Service Catalog --------------- Keystone provides two configuration options for your service catalog. SQL-based Service Catalog (``sql.Catalog``) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ A dynamic database-backed driver fully supporting persistent configuration. ``keystone.conf`` example: .. code-block:: ini [catalog] driver = sql .. NOTE:: A `template_file` does not need to be defined for the sql.Catalog driver. To build your service catalog using this driver, see the built-in help: .. code-block:: bash $ openstack --help $ openstack help service create $ openstack help endpoint create You can also refer to `an example in Keystone (tools/sample_data.sh) `_. File-based Service Catalog (``templated.Catalog``) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The templated catalog is an in-memory backend initialized from a read-only ``template_file``. Choose this option only if you know that your service catalog will not change very much over time. .. NOTE:: Attempting to change your service catalog against this driver will result in ``HTTP 501 Not Implemented`` errors. This is the expected behavior. If you want to use these commands, you must instead use the SQL-based Service Catalog driver. ``keystone.conf`` example: .. code-block:: ini [catalog] driver = templated template_file = /opt/stack/keystone/etc/default_catalog.templates The value of ``template_file`` is expected to be an absolute path to your service catalog configuration. An example ``template_file`` is included in Keystone, however you should create your own to reflect your deployment. Another such example is `available in devstack (files/default_catalog.templates) `_. Endpoint Filtering enables creation of ad-hoc catalogs for each project-scoped token request. Configure the endpoint filter catalog driver in the ``[catalog]`` section. For example: .. code-block:: ini [catalog] driver = catalog_sql In the ``[endpoint_filter]`` section, set ``return_all_endpoints_if_no_filter`` to ``False`` to return an empty catalog if no associations are made. For example: .. code-block:: ini [endpoint_filter] return_all_endpoints_if_no_filter = False See `API Specification for Endpoint Filtering `_ for the details of API definition. .. NOTE:: Support status for Endpoint Filtering *Experimental* (Icehouse, Juno) *Stable* (Kilo) Logging ------- Logging is configured externally to the rest of Keystone. Configure the path to your logging configuration file using the ``[DEFAULT] log_config_append`` option of ``keystone.conf``. If you wish to route all your logging through syslog, set the ``[DEFAULT] use_syslog`` option. A sample ``log_config_append`` file is included with the project at ``etc/logging.conf.sample``. Like other OpenStack projects, Keystone uses the `Python logging module`_, which includes extensive configuration options for choosing the output levels and formats. .. _Paste: http://pythonpaste.org/ .. _`Python logging module`: http://docs.python.org/library/logging.html SSL --- Keystone may be configured to support SSL and 2-way SSL out-of-the-box. The X509 certificates used by Keystone can be generated by ``keystone-manage`` or obtained externally and configured for use with Keystone as described in this section. Here is the description of each of them and their purpose: .. WARNING:: The SSL configuration options available to the eventlet server (``keystone-all``) described here are severely limited. A secure deployment should have Keystone running in a web server (such as Apache httpd), or behind an SSL terminator. When running Keystone in a web server or behind an SSL terminator the options described in this section have no effect and SSL is configured in the web server or SSL terminator. Types of certificates ^^^^^^^^^^^^^^^^^^^^^ * ``cacert.pem``: Certificate Authority chain to validate against. * ``ssl_cert.pem``: Public certificate for Keystone server. * ``middleware.pem``: Public and private certificate for Keystone middleware/client. * ``cakey.pem``: Private key for the CA. * ``ssl_key.pem``: Private key for the Keystone server. Note that you may choose whatever names you want for these certificates, or combine the public/private keys in the same file if you wish. These certificates are just provided as an example. Configuration ^^^^^^^^^^^^^ To enable SSL modify the ``etc/keystone.conf`` file under the ``[ssl]`` and ``[eventlet_server_ssl]`` sections. The following is an SSL configuration example using the included sample certificates: .. code-block:: ini [eventlet_server_ssl] enable = True certfile = keyfile = ca_certs = cert_required = False [ssl] ca_key = key_size = 1024 valid_days=3650 cert_subject=/C=US/ST=Unset/L=Unset/O=Unset/CN=localhost * ``enable``: True enables SSL. Defaults to False. * ``certfile``: Path to Keystone public certificate file. * ``keyfile``: Path to Keystone private certificate file. If the private key is included in the certfile, the keyfile may be omitted. * ``ca_certs``: Path to CA trust chain. * ``cert_required``: Requires client certificate. Defaults to False. When generating SSL certificates the following values are read * ``key_size``: Key size to create. Defaults to 1024. * ``valid_days``: How long the certificate is valid for. Defaults to 3650 (10 years). * ``ca_key``: The private key for the CA. Defaults to ``/etc/keystone/ssl/certs/cakey.pem``. * ``cert_subject``: The subject to set in the certificate. Defaults to ``/C=US/ST=Unset/L=Unset/O=Unset/CN=localhost``. When setting the subject it is important to set CN to be the address of the server so client validation will succeed. This generally means having the subject be at least ``/CN=`` Generating SSL certificates ^^^^^^^^^^^^^^^^^^^^^^^^^^^ Certificates for encrypted HTTP communication can be generated by: .. code-block:: bash $ keystone-manage ssl_setup This will create a private key, a public key and a certificate that will be used to encrypt communications with keystone. In the event that a Certificate Authority is not given a testing one will be created. It is likely in a production environment that these certificates will be created and provided externally. Note that ``ssl_setup`` is a development tool and is only recommended for developments environment. We do not recommend using ``ssl_setup`` for production environments. User CRUD additions for the V2.0 API ------------------------------------ For the V2.0 API, Keystone provides an additional capability that allows users to use a HTTP PATCH to change their own password. Each user can then change their own password with a HTTP PATCH : .. code-block:: bash $ curl -X PATCH http://localhost:5000/v2.0/OS-KSCRUD/users/ -H "Content-type: application/json" \ -H "X_Auth_Token: " -d '{"user": {"password": "ABCD", "original_password": "DCBA"}}' In addition to changing their password all of the user's current tokens will be revoked. Inherited Role Assignments -------------------------- Keystone provides an optional capability to assign roles on a project or domain that, rather than affect the project or domain itself, are instead inherited to the project subtree or to all projects owned by that domain. This capability is enabled by default, but can be disabled by including the following in ``keystone.conf``: .. code-block:: ini [os_inherit] enabled = False Endpoint Policy --------------- The Endpoint Policy feature provides associations between service endpoints and policies that are already stored in the Identity server and referenced by a policy ID. Configure the endpoint policy backend driver in the ``[endpoint_policy]`` section. For example: .. code-block:: ini [endpoint_policy] driver = sql See `API Specification for Endpoint Policy `_ for the details of API definition. .. NOTE:: Support status for Endpoint Policy *Experimental* (Juno) *Stable* (Kilo) OAuth1 1.0a ----------- The OAuth 1.0a feature provides the ability for Identity users to delegate roles to third party consumers via the OAuth 1.0a specification. To enable OAuth1: 1. Add the oauth1 driver to the ``[oauth1]`` section in ``keystone.conf``. For example: .. code-block:: ini [oauth1] driver = sql 2. Add the ``oauth1`` authentication method to the ``[auth]`` section in ``keystone.conf``: .. code-block:: ini [auth] methods = external,password,token,oauth1 3. If deploying under Apache httpd with ``mod_wsgi``, set the `WSGIPassAuthorization` to allow the OAuth Authorization headers to pass through `mod_wsgi`. For example, add the following to the keystone virtual host file: .. code-block:: ini WSGIPassAuthorization On See `API Specification for OAuth 1.0a `_ for the details of API definition. .. NOTE:: Support status for OAuth 1.0a *Experimental* (Havana, Icehouse) *Stable* (Juno) Revocation Events ----------------- The Revocation Events feature provides a list of token revocations. Each event expresses a set of criteria which describes a set of tokens that are no longer valid. Add the revoke backend driver to the ``[revoke]`` section in ``keystone.conf``. For example: .. code-block:: ini [revoke] driver = sql See `API Specification for Revocation Events `_ for the details of API definition. .. NOTE:: Support status for Revocation Events *Experimental* (Juno) *Stable* (Kilo) Token Binding ------------- Token binding refers to the practice of embedding information from external authentication providers (like a company's Kerberos server) inside the token such that a client may enforce that the token only be used in conjunction with that specified authentication. This is an additional security mechanism as it means that if a token is stolen it will not be usable without also providing the external authentication. To activate token binding you must specify the types of authentication that token binding should be used for in ``keystone.conf`` e.g.: .. code-block:: ini [token] bind = kerberos Currently only ``kerberos`` is supported. To enforce checking of token binding the ``enforce_token_bind`` parameter should be set to one of the following modes: * ``disabled`` disable token bind checking * ``permissive`` enable bind checking, if a token is bound to a mechanism that is unknown to the server then ignore it. This is the default. * ``strict`` enable bind checking, if a token is bound to a mechanism that is unknown to the server then this token should be rejected. * ``required`` enable bind checking and require that at least 1 bind mechanism is used for tokens. * named enable bind checking and require that the specified authentication mechanism is used. e.g.: .. code-block:: ini [token] enforce_token_bind = kerberos *Do not* set ``enforce_token_bind = named`` as there is not an authentication mechanism called ``named``. Limiting the number of entities returned in a collection -------------------------------------------------------- Keystone provides a method of setting a limit to the number of entities returned in a collection, which is useful to prevent overly long response times for list queries that have not specified a sufficiently narrow filter. This limit can be set globally by setting ``list_limit`` in the default section of ``keystone.conf``, with no limit set by default. Individual driver sections may override this global value with a specific limit, for example: .. code-block:: ini [resource] list_limit = 100 If a response to ``list_{entity}`` call has been truncated, then the response status code will still be 200 (OK), but the ``truncated`` attribute in the collection will be set to ``true``. URL safe naming of projects and domains --------------------------------------- In the future, keystone may offer the ability to identify a project in a hierarchy via a URL style of naming from the root of the hierarchy (for example specifying 'projectA/projectB/projectC' as the project name in an authentication request). In order to prepare for this, keystone supports the optional ability to ensure both projects and domains are named without including any of the reserverd characters specified in section 2.2 of `rfc3986 `_. The safety of the names of projects and domains can be controlled via two configuration options: .. code-block:: ini [resource] project_name_url_safe = off domain_name_url_safe = off When set to ``off`` (which is the default), no checking is done on the URL safeness of names. When set to ``new``, an attempt to create a new project or domain with an unsafe name (or update the name of a project or domain to be unsafe) will cause a status code of 400 (Bad Request) to be returned. Setting the configuration option to ``strict`` will, in addition to preventing the creation and updating of entities with unsafe names, cause an authentication attempt which specifies a project or domain name that is unsafe to return a status code of 401 (Unauthorized). It is recommended that installations take the steps necessary to where they can run with both options set to ``strict`` as soon as is practical. Sample Configuration Files -------------------------- The ``etc/`` folder distributed with Keystone contains example configuration files for each Server application. * ``etc/keystone.conf.sample`` * ``etc/keystone-paste.ini`` * ``etc/logging.conf.sample`` * ``etc/default_catalog.templates`` * ``etc/sso_callback_template.html`` .. _`API protection with RBAC`: Keystone API protection with Role Based Access Control (RBAC) ============================================================= Like most OpenStack projects, Keystone supports the protection of its APIs by defining policy rules based on an RBAC approach. These are stored in a JSON policy file, the name and location of which is set in the main Keystone configuration file. Each Keystone v3 API has a line in the policy file which dictates what level of protection is applied to it, where each line is of the form:: : or where: ```` can contain ```` or ```` ```` is a set of identifiers that must match between the token provided by the caller of the API and the parameters or target entities of the API call in question. For example: .. code-block:: javascript "identity:create_user": "role:admin and domain_id:%(user.domain_id)s" Indicates that to create a user you must have the admin role in your token and in addition the domain_id in your token (which implies this must be a domain scoped token) must match the domain_id in the user object you are trying to create. In other words, you must have the admin role on the domain in which you are creating the user, and the token you are using must be scoped to that domain. Each component of a match statement is of the form:: : or The following attributes are available * Attributes from token: user_id, the domain_id or project_id depending on the scope, and the list of roles you have within that scope * Attributes related to API call: Any parameters that are passed into the API call are available, along with any filters specified in the query string. Attributes of objects passed can be referenced using an object.attribute syntax (e.g. user.domain_id). The target objects of an API are also available using a target.object.attribute syntax. For instance: .. code-block:: javascript "identity:delete_user": "role:admin and domain_id:%(target.user.domain_id)s" would ensure that the user object that is being deleted is in the same domain as the token provided. Every target object (except token) has an `id` and a `name` available as `target..id` and `target..name`. Other attributes are retrieved from the database and vary between object types. Moreover, some database fields are filtered out (e.g. user passwords). List of object attributes: * role: * target.role.domain_id * target.role.id * target.role.name * user: * target.user.default_project_id * target.user.description * target.user.domain_id * target.user.enabled * target.user.id * target.user.name * group: * target.group.description * target.group.domain_id * target.group.id * target.group.name * domain: * target.domain.enabled * target.domain.id * target.domain.name * project: * target.project.description * target.project.domain_id * target.project.enabled * target.project.id * target.project.name * token * target.token.user_id * target.token.user.domain.id The default policy.json file supplied provides a somewhat basic example of API protection, and does not assume any particular use of domains. For multi-domain configuration installations where, for example, a cloud provider wishes to allow administration of the contents of a domain to be delegated, it is recommended that the supplied policy.v3cloudsample.json is used as a basis for creating a suitable production policy file. This example policy file also shows the use of an admin_domain to allow a cloud provider to enable cloud administrators to have wider access across the APIs. A clean installation would need to perhaps start with the standard policy file, to allow creation of the admin_domain with the first users within it. The domain_id of the admin domain would then be obtained and could be pasted into a modified version of policy.v3cloudsample.json which could then be enabled as the main policy file. .. _`prepare your deployment`: Preparing your deployment ========================= Step 1: Configure keystone.conf ------------------------------- Ensure that your ``keystone.conf`` is configured to use a SQL driver: .. code-block:: ini [identity] driver = sql You may also want to configure your ``[database]`` settings to better reflect your environment: .. code-block:: ini [database] connection = sqlite:///keystone.db idle_timeout = 200 .. NOTE:: It is important that the database that you specify be different from the one containing your existing install. Step 2: Sync your new, empty database ------------------------------------- You should now be ready to initialize your new database without error, using: .. code-block:: bash $ keystone-manage db_sync To test this, you should now be able to start ``keystone-all`` and use the OpenStack Client to list your projects (which should successfully return an empty list from your new database): .. code-block:: bash $ openstack --os-token ADMIN --os-url http://127.0.0.1:35357/v2.0/ project list .. NOTE:: We're providing the default OS_TOKEN and OS_URL values from ``keystone.conf`` to connect to the Keystone service. If you changed those values, or deployed Keystone to a different endpoint, you will need to change the provided command accordingly. Initializing Keystone ===================== ``keystone-manage`` is designed to execute commands that cannot be administered through the normal REST API. At the moment, the following calls are supported: * ``db_sync``: Sync the database. * ``db_version``: Print the current migration version of the database. * ``domain_config_upload``: Upload domain configuration file. * ``fernet_rotate``: Rotate keys in the Fernet key repository. * ``fernet_setup``: Setup a Fernet key repository. * ``mapping_engine``: Test your federation mapping rules. * ``mapping_purge``: Purge the identity mapping table. * ``pki_setup``: Initialize the certificates used to sign tokens. * ``saml_idp_metadata``: Generate identity provider metadata. * ``ssl_setup``: Generate certificates for SSL. * ``token_flush``: Purge expired tokens Invoking ``keystone-manage`` by itself will give you additional usage information. The private key used for token signing can only be read by its owner. This prevents unauthorized users from spuriously signing tokens. ``keystone-manage pki_setup`` Should be run as the same system user that will be running the Keystone service to ensure proper ownership for the private key file and the associated certificates. Adding Users, Projects, and Roles via Command Line Interfaces ============================================================= Keystone APIs are protected by the rules in the policy file. The default policy rules require admin credentials to administer ``users``, ``projects``, and ``roles``. See section `Keystone API protection with Role Based Access Control (RBAC)`_ for more details on policy files. The Keystone command line interface packaged in `python-keystoneclient`_ only supports the Identity v2.0 API. The OpenStack common command line interface packaged in `python-openstackclient`_ supports both v2.0 and v3 APIs. With both command line interfaces there are two ways to configure the client to use admin credentials, using either an existing token or password credentials. .. NOTE:: As of the Juno release, it is recommended to use ``python-openstackclient``, as it supports both v2.0 and v3 APIs. For the purpose of backwards compatibility, the CLI packaged in ``python-keystoneclient`` is not being removed. .. _`python-openstackclient`: http://docs.openstack.org/developer/python-openstackclient/ .. _`python-keystoneclient`: http://docs.openstack.org/developer/python-keystoneclient/ Authenticating with a Token --------------------------- .. NOTE:: If your Keystone deployment is brand new, you will need to use this authentication method, along with your ``[DEFAULT] admin_token``. To authenticate with Keystone using a token and ``python-openstackclient``, set the following flags. * ``--os-url OS_URL``: Keystone endpoint the user communicates with * ``--os-token OS_TOKEN``: User's service token To administer a Keystone endpoint, your token should be either belong to a user with the ``admin`` role, or, if you haven't created one yet, should be equal to the value defined by ``[DEFAULT] admin_token`` in your ``keystone.conf``. You can also set these variables in your environment so that they do not need to be passed as arguments each time: .. code-block:: bash $ export OS_URL=http://localhost:35357/v2.0 $ export OS_TOKEN=ADMIN Instead of ``python-openstackclient``, if using ``python-keystoneclient``, set the following: * ``--os-endpoint OS_SERVICE_ENDPOINT``: equivalent to ``--os-url OS_URL`` * ``--os-service-token OS_SERVICE_TOKEN``: equivalent to ``--os-token OS_TOKEN`` Authenticating with a Password ------------------------------ To authenticate with Keystone using a password and ``python-openstackclient``, set the following flags, note that the following user referenced below should be granted the ``admin`` role. * ``--os-username OS_USERNAME``: Name of your user * ``--os-password OS_PASSWORD``: Password for your user * ``--os-project-name OS_PROJECT_NAME``: Name of your project * ``--os-auth-url OS_AUTH_URL``: URL of the Keystone authentication server You can also set these variables in your environment so that they do not need to be passed as arguments each time: .. code-block:: bash $ export OS_USERNAME=my_username $ export OS_PASSWORD=my_password $ export OS_PROJECT_NAME=my_project $ export OS_AUTH_URL=http://localhost:35357/v2.0 If using ``python-keystoneclient``, set the following instead: * ``--os-tenant-name OS_TENANT_NAME``: equivalent to ``--os-project-name OS_PROJECT_NAME`` Example usage ------------- ``python-openstackclient`` is set up to expect commands in the general form of: .. code-block:: bash $ openstack [] [] [] For example, the commands ``user list`` and ``project create`` can be invoked as follows: .. code-block:: bash # Using token authentication, with environment variables $ export OS_URL=http://127.0.0.1:35357/v2.0/ $ export OS_TOKEN=secrete_token $ openstack user list $ openstack project create demo # Using token authentication, with flags $ openstack --os-token=secrete --os-url=http://127.0.0.1:35357/v2.0/ user list $ openstack --os-token=secrete --os-url=http://127.0.0.1:35357/v2.0/ project create demo # Using password authentication, with environment variables $ export OS_USERNAME=admin $ export OS_PASSWORD=secrete $ export OS_PROJECT_NAME=admin $ export OS_AUTH_URL=http://localhost:35357/v2.0 $ openstack user list $ openstack project create demo # Using password authentication, with flags $ openstack --os-username=admin --os-password=secrete --os-project-name=admin --os-auth-url=http://localhost:35357/v2.0 user list $ openstack --os-username=admin --os-password=secrete --os-project-name=admin --os-auth-url=http://localhost:35357/v2.0 project create demo Removing Expired Tokens ======================= In the SQL backend expired tokens are not automatically removed. These tokens can be removed with: .. code-block:: bash $ keystone-manage token_flush The memcache backend automatically discards expired tokens and so flushing is unnecessary and if attempted will fail with a NotImplemented error. Configuring the LDAP Identity Provider ====================================== As an alternative to the SQL Database backing store, Keystone can use a directory server to provide the Identity service. An example Schema for OpenStack would look like this:: dn: dc=openstack,dc=org dc: openstack objectClass: dcObject objectClass: organizationalUnit ou: openstack dn: ou=Projects,dc=openstack,dc=org objectClass: top objectClass: organizationalUnit ou: groups dn: ou=Users,dc=openstack,dc=org objectClass: top objectClass: organizationalUnit ou: users dn: ou=Roles,dc=openstack,dc=org objectClass: top objectClass: organizationalUnit ou: roles The corresponding entries in the Keystone configuration file are: .. code-block:: ini [ldap] url = ldap://localhost user = dc=Manager,dc=openstack,dc=org password = badpassword suffix = dc=openstack,dc=org use_dumb_member = False allow_subtree_delete = False user_tree_dn = ou=Users,dc=openstack,dc=org user_objectclass = inetOrgPerson The default object classes and attributes are intentionally simplistic. They reflect the common standard objects according to the LDAP RFCs. However, in a live deployment, the correct attributes can be overridden to support a preexisting, more complex schema. For example, in the user object, the objectClass posixAccount from RFC2307 is very common. If this is the underlying objectclass, then the *uid* field should probably be *uidNumber* and *username* field either *uid* or *cn*. To change these two fields, the corresponding entries in the Keystone configuration file are: .. code-block:: ini [ldap] user_id_attribute = uidNumber user_name_attribute = cn There is a set of allowed actions per object type that you can modify depending on your specific deployment. For example, the users are managed by another tool and you have only read access, in such case the configuration is: .. code-block:: ini [ldap] user_allow_create = False user_allow_update = False user_allow_delete = False There are some configuration options for filtering users, tenants and roles, if the backend is providing too much output, in such case the configuration will look like: .. code-block:: ini [ldap] user_filter = (memberof=CN=openstack-users,OU=workgroups,DC=openstack,DC=org) In case that the directory server does not have an attribute enabled of type boolean for the user, there is several configuration parameters that can be used to extract the value from an integer attribute like in Active Directory: .. code-block:: ini [ldap] user_enabled_attribute = userAccountControl user_enabled_mask = 2 user_enabled_default = 512 In this case the attribute is an integer and the enabled attribute is listed in bit 1, so the if the mask configured *user_enabled_mask* is different from 0, it gets the value from the field *user_enabled_attribute* and it makes an ADD operation with the value indicated on *user_enabled_mask* and if the value matches the mask then the account is disabled. It also saves the value without mask to the user identity in the attribute *enabled_nomask*. This is needed in order to set it back in case that we need to change it to enable/disable a user because it contains more information than the status like password expiration. Last setting *user_enabled_mask* is needed in order to create a default value on the integer attribute (512 = NORMAL ACCOUNT on AD) In case of Active Directory the classes and attributes could not match the specified classes in the LDAP module so you can configure them like: .. code-block:: ini [ldap] user_objectclass = person user_id_attribute = cn user_name_attribute = cn user_description_attribute = displayName user_mail_attribute = mail user_enabled_attribute = userAccountControl user_enabled_mask = 2 user_enabled_default = 512 user_attribute_ignore = tenant_id,tenants Debugging LDAP -------------- For additional information on LDAP connections, performance (such as slow response time), or field mappings, setting ``debug_level`` in the [ldap] section is used to enable debugging: .. code-block:: ini debug_level = 4095 This setting in turn sets OPT_DEBUG_LEVEL in the underlying python library. This field is a bit mask (integer), and the possible flags are documented in the OpenLDAP manpages. Commonly used values include 255 and 4095, with 4095 being more verbose. .. WARNING:: Enabling ``debug_level`` will negatively impact performance. Enabled Emulation ----------------- Some directory servers do not provide any enabled attribute. For these servers, the ``user_enabled_emulation`` attribute has been created. It is enabled by setting the respective flags to True. Then the attribute ``user_enabled_emulation_dn`` may be set to specify how the enabled users are selected. This attribute works by using a ``groupOfNames`` entry and adding whichever users or that you want enabled to the respective group with the ``member`` attribute. For example, this will mark any user who is a member of ``enabled_users`` as enabled: .. code-block:: ini [ldap] user_enabled_emulation = True user_enabled_emulation_dn = cn=enabled_users,cn=groups,dc=openstack,dc=org The default values for user enabled emulation DN is ``cn=enabled_users,$user_tree_dn``. If a different LDAP schema is used for group membership, it is possible to use the ``group_objectclass`` and ``group_member_attribute`` attributes to determine membership in the enabled emulation group by setting the ``user_enabled_emulation_use_group_config`` attribute to True. Secure Connection ----------------- If you are using a directory server to provide the Identity service, it is strongly recommended that you utilize a secure connection from Keystone to the directory server. In addition to supporting LDAP, Keystone also provides Transport Layer Security (TLS) support. There are some basic configuration options for enabling TLS, identifying a single file or directory that contains certificates for all the Certificate Authorities that the Keystone LDAP client will recognize, and declaring what checks the client should perform on server certificates. This functionality can easily be configured as follows: .. code-block:: ini [ldap] use_tls = True tls_cacertfile = /etc/keystone/ssl/certs/cacert.pem tls_cacertdir = /etc/keystone/ssl/certs/ tls_req_cert = demand A few points worth mentioning regarding the above options. If both tls_cacertfile and tls_cacertdir are set then tls_cacertfile will be used and tls_cacertdir is ignored. Furthermore, valid options for tls_req_cert are demand, never, and allow. These correspond to the standard options permitted by the TLS_REQCERT TLS option. Read Only LDAP -------------- Many environments typically have user and group information in directories that are accessible by LDAP. This information is for read-only use in a wide array of applications. Prior to the Havana release, we could not deploy Keystone with read-only directories as backends because Keystone also needed to store information such as projects, roles, domains and role assignments into the directories in conjunction with reading user and group information. Keystone now provides an option whereby these read-only directories can be easily integrated as it now enables its identity entities (which comprises users, groups, and group memberships) to be served out of directories while resource (which comprises projects and domains), assignment and role entities are to be served from different Keystone backends (i.e. SQL). To enable this option, you must have the following ``keystone.conf`` options set: .. code-block:: ini [identity] driver = ldap [resource] driver = sql [assignment] driver = sql [role] driver = sql With the above configuration, Keystone will only lookup identity related information such users, groups, and group membership from the directory, while resources, roles and assignment related information will be provided by the SQL backend. Also note that if there is an LDAP Identity, and no resource, assignment or role backend is specified, they will default to LDAP. Although this may seem counter intuitive, it is provided for backwards compatibility. Nonetheless, the explicit option will always override the implicit option, so specifying the options as shown above will always be correct. Finally, it is also worth noting that whether or not the LDAP accessible directory is to be considered read only is still configured as described in a previous section above by setting values such as the following in the ``[ldap]`` configuration section: .. code-block:: ini [ldap] user_allow_create = False user_allow_update = False user_allow_delete = False .. NOTE:: While having identity related information backed by LDAP while other information is backed by SQL is a supported configuration, as shown above; the opposite is not true. If either resource or assignment drivers are configured for LDAP, then Identity must also be configured for LDAP. Connection Pooling ------------------ Various LDAP backends in Keystone use a common LDAP module to interact with LDAP data. By default, a new connection is established for each LDAP operation. This can become highly expensive when TLS support is enabled, which is a likely configuration in an enterprise setup. Reuse of connectors from a connection pool drastically reduces overhead of initiating a new connection for every LDAP operation. Keystone provides connection pool support via configuration. This will keep LDAP connectors alive and reused for subsequent LDAP operations. The connection lifespan is configurable as other pooling specific attributes. In the LDAP identity driver, Keystone authenticates end users via an LDAP bind with the user's DN and provided password. This kind of authentication bind can fill up the pool pretty quickly, so a separate pool is provided for end user authentication bind calls. If a deployment does not want to use a pool for those binds, then it can disable pooling selectively by setting ``use_auth_pool`` to false. If a deployment wants to use a pool for those authentication binds, then ``use_auth_pool`` needs to be set to true. For the authentication pool, a different pool size (``auth_pool_size``) and connection lifetime (``auth_pool_connection_lifetime``) can be specified. With an enabled authentication pool, its connection lifetime should be kept short so that the pool frequently re-binds the connection with the provided credentials and works reliably in the end user password change case. When ``use_pool`` is false (disabled), then the authentication pool configuration is also not used. Connection pool configuration is part of the ``[ldap]`` configuration section: .. code-block:: ini [ldap] # Enable LDAP connection pooling. (boolean value) use_pool=false # Connection pool size. (integer value) pool_size=10 # Maximum count of reconnect trials. (integer value) pool_retry_max=3 # Time span in seconds to wait between two reconnect trials. # (floating point value) pool_retry_delay=0.1 # Connector timeout in seconds. Value -1 indicates indefinite wait for # response. (integer value) pool_connection_timeout=-1 # Connection lifetime in seconds. (integer value) pool_connection_lifetime=600 # Enable LDAP connection pooling for end user authentication. If use_pool # is disabled, then this setting is meaningless and is not used at all. # (boolean value) use_auth_pool=false # End user auth connection pool size. (integer value) auth_pool_size=100 # End user auth connection lifetime in seconds. (integer value) auth_pool_connection_lifetime=60 Specifying Multiple LDAP servers -------------------------------- Multiple LDAP server URLs can be provided to keystone to provide high-availability support for a single LDAP backend. To specify multiple LDAP servers, simply change the ``url`` option in the ``[ldap]`` section. The new option should list the different servers, each separated by a comma. For example: .. code-block:: ini [ldap] url = "ldap://localhost,ldap://backup.localhost" keystone-9.0.0/doc/source/online_schema_migration_examples.rst0000664000567000056710000000220312701407102026063 0ustar jenkinsjenkins00000000000000 ==================================== Online SQL schema migration examples ==================================== This document links to several examples implementing online SQL schema migrations to facilitate simultaneously running OpenStack services in different versions with the same DB schema. * Nova `data migration example `_ * Nova `data migration enforcement example `_ of sqlalchemy migrate/deprecated scripts * Nova `flavor migration spec `_ example of data migrations in the object layer * Cinder `online schema upgrades spec `_ example of migrating a column to a many-to-many relation table For documentation on how to make online migrations move on to :ref:`Database Schema Migrations `. keystone-9.0.0/doc/source/middlewarearchitecture.rst0000664000567000056710000000255312701407102024040 0ustar jenkinsjenkins00000000000000.. Copyright 2011-2012 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ======================= Middleware Architecture ======================= Abstract ======== The Keystone middleware architecture supports a common authentication protocol in use between the OpenStack projects. By using Keystone as a common authentication and authorization mechanism, the OpenStack project can plug in to existing authentication and authorization systems in use by existing environments. The auth_token middleware is no longer hosted in Keystone and has moved to the keystonemiddleware project. The `documentation regarding authentication middleware`_ can be found there. .. _`documentation regarding authentication middleware`: http://docs.openstack.org/developer/keystonemiddleware/middlewarearchitecture.html keystone-9.0.0/doc/source/federation/0000775000567000056710000000000012701407246020712 5ustar jenkinsjenkins00000000000000keystone-9.0.0/doc/source/federation/mellon.rst0000664000567000056710000001072612701407102022727 0ustar jenkinsjenkins00000000000000:orphan: .. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================== Setup Mellon (mod_auth_mellon) ============================== Configure Apache HTTPD for mod_auth_mellon ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Follow the steps outlined at: `Running Keystone in HTTPD`_. .. _`Running Keystone in HTTPD`: ../apache-httpd.html You'll also need to install the Apache module `mod_auth_mellon `_. For example: .. code-block:: bash $ apt-get install libapache2-mod-auth-mellon Configure your Keystone virtual host and adjust the config to properly handle SAML2 workflow: Add *WSGIScriptAlias* directive to your vhost configuration:: WSGIScriptAliasMatch ^(/v3/OS-FEDERATION/identity_providers/.*?/protocols/.*?/auth)$ /var/www/keystone/main/$1 Make sure the *wsgi-keystone.conf* contains a ** directive for the Mellon module and a ** directive for each identity provider:: MellonEnable "info" MellonSPPrivateKeyFile /etc/httpd/mellon/http_keystone.fqdn.key MellonSPCertFile /etc/httpd/mellon/http_keystone.fqdn.cert MellonSPMetadataFile /etc/httpd/mellon/http_keystone.fqdn.xml MellonIdPMetadataFile /etc/httpd/mellon/idp-metadata.xml MellonEndpointPath /v3/OS-FEDERATION/identity_providers/idp_1/protocols/saml2/auth/mellon MellonIdP "IDP" AuthType "Mellon" MellonEnable "auth" .. NOTE:: * See below for information about how to generate the values for the `MellonSPMetadataFile`, etc. directives. * ``saml2`` may be different in your deployment, but do not use a wildcard value. Otherwise *every* federated protocol will be handled by Mellon. * ``idp_1`` has to be replaced with the name associated with the IdP in Keystone. * You are advised to carefully examine `mod_auth_mellon Apache configuration documentation `_ Enable the Keystone virtual host, for example: .. code-block:: bash $ a2ensite wsgi-keystone.conf Enable the ``ssl`` and ``auth_mellon`` modules, for example: .. code-block:: bash $ a2enmod ssl $ a2enmod auth_mellon Restart the Apache instance that is serving Keystone, for example: .. code-block:: bash $ service apache2 restart Configuring the Mellon SP Metadata ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Mellon provides a script called ``mellon_create_metadata.sh`` which generates the values for the config directives `MellonSPPrivateKeyFile`, `MellonSPCertFile`, and `MellonSPMetadataFile`. It is run like this: .. code-block:: bash $ mellon_create_metadata.sh http://keystone.fqdn:5000 \ http://keystone.fqdn:5000/v3/OS-FEDERATION/identity_providers/idp_1/protocols/saml2/auth/mellon The first parameter is used as the entity ID, a unique identifier for this Keystone SP. You do not have to use the URL, but it is an easy way to uniquely identify each Keystone SP. The second parameter is the full URL for the endpoint path corresponding to the parameter `MellonEndpointPath`. Fetch your Service Provider's Metadata file. This corresponds to the value of the `MellonIdPMetadataFile` directive above. For example: .. code-block:: bash $ wget --cacert /path/to/ca.crt -O /etc/httpd/mellon/idp-metadata.xml \ https://idp.fqdn/idp/saml2/metadata Upload your Service Provider's Metadata file to your Identity Provider. This is the file used as the value of the `MellonSPMetadataFile` in the config, generated by the `mellon_create_metadata.sh` script. The IdP may provide a webpage where you can upload the file, or you may be required to submit the file using `wget` or `curl`. Please check your IdP documentation for details. Once you are done, restart the Apache instance that is serving Keystone, for example: .. code-block:: bash $ service apache2 restart keystone-9.0.0/doc/source/federation/shibboleth.rst0000664000567000056710000002414012701407102023557 0ustar jenkinsjenkins00000000000000:orphan: .. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================ Setup Shibboleth ================ Configure Apache HTTPD for mod_shibboleth ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Follow the steps outlined at: `Running Keystone in HTTPD`_. .. _`Running Keystone in HTTPD`: ../apache-httpd.html You'll also need to install `Shibboleth `_, for example: .. code-block:: bash $ apt-get install libapache2-mod-shib2 Configure your Keystone virtual host and adjust the config to properly handle SAML2 workflow: Add *WSGIScriptAlias* directive to your vhost configuration:: WSGIScriptAliasMatch ^(/v3/OS-FEDERATION/identity_providers/.*?/protocols/.*?/auth)$ /var/www/keystone/main/$1 Make sure the *wsgi-keystone.conf* contains a ** directive for the Shibboleth module and a ** directive for each identity provider:: SetHandler shib ShibRequestSetting requireSession 1 ShibRequestSetting applicationId idp_1 AuthType shibboleth ShibExportAssertion Off Require valid-user ShibRequireSession On ShibRequireAll On .. NOTE:: * ``saml2`` may be different in your deployment, but do not use a wildcard value. Otherwise *every* federated protocol will be handled by Shibboleth. * ``idp_1`` has to be replaced with the name associated with the idp in Keystone. The same name is used inside the shibboleth2.xml configuration file but they could be different. * The ``ShibRequireSession`` and ``ShibRequireAll`` rules are invalid in Apache 2.4+. * You are advised to carefully examine `Shibboleth Apache configuration documentation `_ Enable the Keystone virtual host, for example: .. code-block:: bash $ a2ensite wsgi-keystone.conf Enable the ``ssl`` and ``shib2`` modules, for example: .. code-block:: bash $ a2enmod ssl $ a2enmod shib2 Restart Apache, for example: .. code-block:: bash $ service apache2 restart Configuring shibboleth2.xml ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Once you have your Keystone vhost (virtual host) ready, it's then time to configure Shibboleth and upload your Metadata to the Identity Provider. If new certificates are required, they can be easily created by executing: .. code-block:: bash $ shib-keygen -y The newly created file will be stored under ``/etc/shibboleth/sp-key.pem`` You should fetch your Service Provider's Metadata file. Typically this can be achieved by simply fetching a Metadata file, for example: .. code-block:: bash $ wget --no-check-certificate -O https://service.example.org/Shibboleth.sso/Metadata Upload your Service Provider's Metadata file to your Identity Provider. This step depends on your Identity Provider choice and is not covered here. Configure your Service Provider by editing ``/etc/shibboleth/shibboleth2.xml`` file. You are advised to examine `Shibboleth Service Provider Configuration documentation `_ An example of your ``/etc/shibboleth/shibboleth2.xml`` may look like (The example shown below is for reference only, not to be used in a production environment): .. code-block:: xml SAML2 SAML1 SAML2 Local SAML2 SAML1 SAML2 Local SAML2 SAML1 SAML2 Local Keystone enforces `external authentication`_ when the ``REMOTE_USER`` environment variable is present so make sure Shibboleth doesn't set the ``REMOTE_USER`` environment variable. To do so, scan through the ``/etc/shibboleth/shibboleth2.xml`` configuration file and remove the ``REMOTE_USER`` directives. Examine your attributes map file ``/etc/shibboleth/attribute-map.xml`` and adjust your requirements if needed. For more information see `attributes documentation `_ Once you are done, restart your Shibboleth daemon: .. _`external authentication`: ../external-auth.html .. code-block:: bash $ service shibd restart $ service apache2 restart keystone-9.0.0/doc/source/federation/openidc.rst0000664000567000056710000000634012701407102023057 0ustar jenkinsjenkins00000000000000:orphan: .. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==================== Setup OpenID Connect ==================== Configuring mod_auth_openidc ============================ Federate Keystone (SP) and an external IdP using OpenID Connect (`mod_auth_openidc`_) .. _`mod_auth_openidc`: https://github.com/pingidentity/mod_auth_openidc To install `mod_auth_openidc` on Ubuntu, perform the following: .. code-block:: bash sudo apt-get install libapache2-mod-auth-openidc This module is available for other distributions (Fedora/CentOS/Red Hat) from: https://github.com/pingidentity/mod_auth_openidc/releases In the keystone Apache site file, add the following as a top level option, to load the `mod_auth_openidc` module: .. code-block:: xml LoadModule auth_openidc_module /usr/lib/apache2/modules/mod_auth_openidc.so Also within the same file, locate the virtual host entry and add the following entries for OpenID Connect: .. code-block:: xml ... OIDCClaimPrefix "OIDC-" OIDCResponseType "id_token" OIDCScope "openid email profile" OIDCProviderMetadataURL OIDCClientID OIDCClientSecret OIDCCryptoPassphrase openstack OIDCRedirectURI http://localhost:5000/v3/OS-FEDERATION/identity_providers//protocols/oidc/auth/redirect AuthType openid-connect Require valid-user LogLevel debug Note an example of an `OIDCProviderMetadataURL` instance is: https://accounts.google.com/.well-known/openid-configuration If not using `OIDCProviderMetadataURL`, then the following attributes must be specified: `OIDCProviderIssuer`, `OIDCProviderAuthorizationEndpoint`, `OIDCProviderTokenEndpoint`, `OIDCProviderTokenEndpointAuth`, `OIDCProviderUserInfoEndpoint`, and `OIDCProviderJwksUri` Note, if using a mod_wsgi version less than 4.3.0, then the `OIDCClaimPrefix` must be specified to have only alphanumerics or a dash ("-"). This is because mod_wsgi blocks headers that do not fit this criteria. See http://modwsgi.readthedocs.org/en/latest/release-notes/version-4.3.0.html#bugs-fixed for more details Once you are done, restart your Apache daemon: .. code-block:: bash $ service apache2 restart Tips ==== 1. When creating a mapping, note that the 'remote' attributes will be prefixed, with `HTTP_`, so for instance, if you set OIDCClaimPrefix to `OIDC-`, then a typical remote value to check for is: `HTTP_OIDC_ISS`. 2. Don't forget to add oidc as an [auth] plugin in keystone.conf, see `Step 2`_ .. _`Step 2`: federation/federation.html keystone-9.0.0/doc/source/federation/websso.rst0000664000567000056710000002357612701407102022752 0ustar jenkinsjenkins00000000000000:orphan: .. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =============================== Keystone Federation and Horizon =============================== Keystone Changes ================ 1. Update `trusted_dashboard` in keystone.conf. Specify URLs of trusted horizon servers. This value may be repeated multiple times. This setting ensures that keystone only sends token data back to trusted servers. This is performed as a precaution, specifically to prevent man-in-the-middle (MITM) attacks. .. code-block:: ini [federation] trusted_dashboard = http://acme.horizon.com/auth/websso/ trusted_dashboard = http://beta.horizon.com/auth/websso/ 2. Update httpd vhost file with websso information. The `/v3/auth/OS-FEDERATION/websso/` and `/v3/auth/OS-FEDERATION/identity_providers/{idp_id}/protocols/{protocol_id}/websso` routes must be protected by the chosen httpd module. This is performed so the request that originates from horizon will use the same identity provider that is configured in keystone. .. WARNING:: By using the IdP specific route, a user will no longer leverage the Remote ID of a specific Identity Provider, and will be unable to verify that the Identity Provider is trusted, the mapping will remain as the only means to controlling authorization. If `mod_shib` is used, then use the following as an example: .. code-block:: xml ... AuthType shibboleth Require valid-user ... AuthType shibboleth Require valid-user ... If `mod_auth_openidc` is used, then use the following as an example: .. code-block:: xml OIDCRedirectURI http://localhost:5000/v3/auth/OS-FEDERATION/websso/redirect OIDCRedirectURI http://localhost:5000/v3/auth/OS-FEDERATION/identity_providers/idp_1/protocol/oidc/websso/redirect ... AuthType openid-connect Require valid-user ... AuthType openid-connect Require valid-user ... If `mod_auth_kerb` is used, then use the following as an example: .. code-block:: xml ... AuthType Kerberos AuthName "Acme Corporation" KrbMethodNegotiate on KrbMethodK5Passwd off Krb5Keytab /etc/apache2/http.keytab ... AuthType Kerberos AuthName "Acme Corporation" KrbMethodNegotiate on KrbMethodK5Passwd off Krb5Keytab /etc/apache2/http.keytab ... If `mod_auth_mellon` is used, then use the following as an example: .. code-block:: xml ... AuthType Mellon MellonEnable auth Require valid-user ... AuthType Mellon MellonEnable auth Require valid-user ... .. NOTE:: If you are also using SSO via the API, don't forget to make the Location settings match your configuration used for the keystone identity provider location: `/v3/OS-FEDERATION/identity_providers//protocols//auth` 3. Update `remote_id_attribute` in keystone.conf. A remote id attribute indicates the header to retrieve from the WSGI environment. This header contains information about the identity of the identity provider. For `mod_shib` this would be ``Shib-Identity-Provider``, for `mod_auth_openidc`, this could be ``HTTP_OIDC_ISS``. For `mod_auth_mellon`, this could be ``MELLON_IDP``. It is recommended that this option be set on a per-protocol basis. .. code-block:: ini [saml2] remote_id_attribute = Shib-Identity-Provider [oidc] remote_id_attribute = HTTP_OIDC_ISS Alternatively, a generic option may be set at the `[federation]` level. .. code-block:: ini [federation] remote_id_attribute = HTTP_OIDC_ISS 4. Set `remote_ids` for a keystone identity provider using the API or CLI. A keystone identity provider may have multiple `remote_ids` specified, this allows the same *keystone* identity provider resource to be used with multiple external identity providers. For example, an identity provider resource ``university-idp``, may have the following `remote_ids`: ``['university-x', 'university-y', 'university-z']``. This removes the need to configure N identity providers in keystone. This can be performed using the `OS-FEDERATION API`_: ``PATCH /OS-FEDERATION/identity_providers/{idp_id}`` Or by using the `OpenStackClient CLI`_: .. code-block:: bash $ openstack identity provider set --remote-id .. NOTE:: Remote IDs are globally unique. Two identity providers cannot be associated with the same remote ID. Once authenticated with the external identity provider, keystone will determine which identity provider and mapping to use based on the protocol and the value returned from the `remote_id_attribute` key. For example, if our identity provider is ``google``, the mapping used is ``google_mapping`` and the protocol is ``oidc``. The identity provider's remote IDs would be: [``accounts.google.com``]. The `remote_id_attribute` value may be set to ``HTTP_OIDC_ISS``, since this value will always be ``accounts.google.com``. The motivation for this approach is that there will always be some data sent by the identity provider (in the assertion or claim) that uniquely identifies the identity provider. This removes the requirement for horizon to list all the identity providers that are trusted by keystone. .. _`OpenStackClient CLI`: http://docs.openstack.org/developer/python-openstackclient/command-objects/identity-provider.html#identity-provider-set .. _`OS-FEDERATION API`: http://specs.openstack.org/openstack/keystone-specs/api/v3/identity-api-v3-os-federation-ext.html#update-identity-provider Horizon Changes =============== .. NOTE:: Django OpenStack Auth version 1.2.0 or higher is required for these steps. Identity provider and federation protocol specific webSSO is only available in Django OpenStack Auth version 2.0.0 or higher. 1. Set the Identity Service version to 3 Ensure the `OPENSTACK_API_VERSIONS` option in horizon's local_settings.py has been updated to indicate that the `identity` version to use is `3`. .. code-block:: python OPENSTACK_API_VERSIONS = { "identity": 3, } 2. Authenticate against Identity Server v3. Ensure the `OPENSTACK_KEYSTONE_URL` option in horizon's local_settings.py has been updated to point to a v3 URL. .. code-block:: python OPENSTACK_KEYSTONE_URL = "http://localhost:5000/v3" 3. Set the `WEBSSO_ENABLED` option. Ensure the `WEBSSO_ENABLED` option is set to True in horizon's local_settings.py file, this will provide users with an updated login screen for horizon. .. code-block:: python WEBSSO_ENABLED = True 4. (Optional) Create a list of authentication methods with the `WEBSSO_CHOICES` option. Within horizon's settings.py file, a list of supported authentication methods can be specified. The list includes Keystone federation protocols such as OpenID Connect and SAML, and also keys that map to specific identity provider and federation protocol combinations (as defined in `WEBSSO_IDP_MAPPING`). With the exception of ``credentials`` which is reserved by horizon, and maps to the user name and password used by keystone's identity backend. .. code-block:: python WEBSSO_CHOICES = ( ("credentials", _("Keystone Credentials")), ("oidc", _("OpenID Connect")), ("saml2", _("Security Assertion Markup Language")), ("idp_1_oidc", "Acme Corporation - OpenID Connect"), ("idp_1_saml2", "Acme Corporation - SAML2") ) 5. (Optional) Create a dictionary of specific identity provider and federation protocol combinations. A dictionary of specific identity provider and federation protocol combinations. From the selected authentication mechanism, the value will be looked up as keys in the dictionary. If a match is found, it will redirect the user to a identity provider and federation protocol specific WebSSO endpoint in keystone, otherwise it will use the value as the protocol_id when redirecting to the WebSSO by protocol endpoint. .. code-block:: python WEBSSO_IDP_MAPPING = { "idp_1_oidc": ("idp_1", "oidc"), "idp_1_saml2": ("idp_1", "saml2") } .. NOTE:: The value is expected to be a tuple formatted as: (, ). 6. (Optional) Specify an initial choice with the `WEBSSO_INITIAL_CHOICE` option. The list set by the `WEBSSO_CHOICES` option will be generated in a drop-down menu in the login screen. The setting `WEBSSO_INITIAL_CHOICE` will automatically set that choice to be highlighted by default. .. code-block:: python WEBSSO_INITIAL_CHOICE = "credentials" keystone-9.0.0/doc/source/developing.rst0000664000567000056710000011067412701407105021463 0ustar jenkinsjenkins00000000000000.. Copyright 2011-2012 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ======================== Developing with Keystone ======================== Setup ----- Get your development environment set up according to :doc:`devref/development.environment`. It is recommended that you install Keystone into a virtualenv. Configuring Keystone -------------------- Keystone requires a configuration file. There is a sample configuration file that can be used to get started: .. code-block:: bash $ cp etc/keystone.conf.sample etc/keystone.conf The defaults are enough to get you going, but you can make any changes if needed. Running Keystone ---------------- To run the Keystone Admin and API server instances, use: .. code-block:: bash $ keystone-all This runs Keystone with the configuration the etc/ directory of the project. See :doc:`configuration` for details on how Keystone is configured. By default, Keystone is configured with SQL backends. Interacting with Keystone ------------------------- You can interact with Keystone through the command line using :doc:`man/keystone-manage` which allows you to initialize keystone, etc. You can also interact with Keystone through its REST API. There is a Python Keystone client library `python-keystoneclient`_ which interacts exclusively through the REST API, and which Keystone itself uses to provide its command-line interface. When initially getting set up, after you've configured which databases to use, you're probably going to need to run the following to your database schema in place: .. code-block:: bash $ keystone-manage db_sync .. _`python-keystoneclient`: https://git.openstack.org/cgit/openstack/python-keystoneclient .. _`openstackclient`: https://git.openstack.org/cgit/openstack/python-openstackclient If the above commands result in a ``KeyError``, or they fail on a ``.pyc`` file with the message, ``You can only have one Python script per version``, then it is possible that there are out-of-date compiled Python bytecode files in the Keystone directory tree that are causing problems. This can occur if you have previously installed and ran older versions of Keystone. These out-of-date files can be easily removed by running a command like the following from the Keystone root project directory: .. code-block:: bash $ find . -name "*.pyc" -delete Database Schema Migrations -------------------------- Keystone uses SQLAlchemy-migrate_ to migrate the SQL database between revisions. For core components, the migrations are kept in a central repository under ``keystone/common/sql/migrate_repo/versions``. Each SQL migration has a version which can be identified by the name of the script, the version is the number before the underline. For example, if the script is named ``001_add_X_table.py`` then the version of the SQL migration is ``1``. .. _SQLAlchemy-migrate: https://git.openstack.org/cgit/openstack/sqlalchemy-migrate For the migration to work, both the ``migrate_repo`` and ``versions`` subdirectories must have ``__init__.py`` files. SQLAlchemy-migrate will look for a configuration file in the ``migrate_repo`` named ``migrate.cfg``. This conforms to a key/value `ini` file format. A sample configuration file with the minimal set of values is:: [db_settings] repository_id=my_extension version_table=migrate_version required_dbs=[] To run a migration for upgrade, simply run: .. code-block:: bash $ keystone-manage db_sync .. NOTE:: If no version is specified, then the most recent migration will be used. .. NOTE:: Schema downgrades are not supported. .. _online-migration: From Mitaka release, we are starting to write the migration scripts in a backward compatible way to support `online schema migration`_. The following guidelines for schema and data migrations should be followed: * Additive schema migrations - In general, almost all schema migrations should be additive. Put simply, they should only create elements like columns, indices, and tables. * Subtractive schema migrations - To remove an element like a column or table: #. Expand phase: The element must be deprecated and retained for backward compatibility. This allows for graceful upgrade from X release to X+1. #. Migrate phase: Data migration must completely migrate data from the old version of the schema to the new version. Data migrations should have the ability to run online, while the service is operating normally, so the keystone service implementation (typically the SQLAlchemy model) has to be aware that data should be retrieved and/or written from/to more than one place and format, to maintain consistency (see examples below). #. Contract phase: The column can then be removed with a schema migration at the start of X+2. Contract phase can't happen if the data migration isn't finished (see last point in this section). * Release notes - There should be a release note in case an operation is "blocking", "expensive", or both. You can find information on which DDL operations are expensive in `MySQL docs`_. Other supported SQL DBs support `transactional DDL`_, and experienced DBA's know to take advantage of this feature. * Constraints - When adding a foreign or unique key constraint, the schema migration code needs to handle possible problems with data before applying the constraint. For example, a unique constraint must clean up duplicate records before applying said constraint. * Data migrations - should be done in an online fashion by custom code in the SQLAlchemy layer that handles moving data between the old and new portions of the schema. In addition, for each type of data migration performed, a keystone-manage command can be added for the operator to manually request that rows be migrated (see examples below, like the nova flavor migration). * All schema migrations should be idempotent. For example, a migration should check if an element exists in the schema before attempting to add it. This logic comes for free in the autogenerated workflow of the online migrations. * Before running `contract` in the expand/migrate/contract schema migration workflow, the remaining data migrations should be performed by the contract script. Alternatively, running a relevant keystone-manage migration should be enforced, to ensure that all remaining data migrations are completed. It is a good practice to move data out of the old columns, and ensure they are filled with null values before removing them. A good example of an online schema migration is documented in a `cinder spec`_. See more examples in :doc:`online_schema_migration_examples`. .. _`online schema migration`: https://specs.openstack.org/openstack/keystone-specs/specs/mitaka/online-schema-migration.html .. _`MySQL docs`: https://dev.mysql.com/doc/refman/5.7/en/innodb-create-index-overview.html .. _`transactional DDL`: https://wiki.postgresql.org/wiki/Transactional_DDL_in_PostgreSQL:_A_Competitive_Analysis .. _`cinder spec`: https://specs.openstack.org/openstack/cinder-specs/specs/mitaka/online-schema-upgrades.html Initial Sample Data ------------------- There is an included script which is helpful in setting up some initial sample data for use with keystone: .. code-block:: bash $ OS_TOKEN=ADMIN tools/sample_data.sh Notice it requires a service token read from an environment variable for authentication. The default value "ADMIN" is from the ``admin_token`` option in the ``[DEFAULT]`` section in ``etc/keystone.conf``. Once run, you can see the sample data that has been created by using the `openstackclient`_ command-line interface: .. code-block:: bash $ openstack --os-token ADMIN --os-url http://127.0.0.1:35357/v2.0/ user list The `openstackclient`_ can be installed using the following: .. code-block:: bash $ pip install python-openstackclient Filtering responsibilities between controllers and drivers ---------------------------------------------------------- Keystone supports the specification of filtering on list queries as part of the v3 identity API. By default these queries are satisfied in the controller class when a controller calls the ``wrap_collection`` method at the end of a ``list_{entity}`` method. However, to enable optimum performance, any driver can implement some or all of the specified filters (for example, by adding filtering to the generated SQL statements to generate the list). The communication of the filter details between the controller level and its drivers is handled by the passing of a reference to a Hints object, which is a list of dicts describing the filters. A driver that satisfies a filter must delete the filter from the Hints object so that when it is returned to the controller level, it knows to only execute any unsatisfied filters. The contract for a driver for ``list_{entity}`` methods is therefore: * It MUST return a list of entities of the specified type * It MAY either just return all such entities, or alternatively reduce the list by filtering for one or more of the specified filters in the passed Hints reference, and removing any such satisfied filters. An exception to this is that for identity drivers that support domains, then they should at least support filtering by domain_id. Entity list truncation by drivers --------------------------------- Keystone supports the ability for a deployment to restrict the number of entries returned from ``list_{entity}`` methods, typically to prevent poorly formed searches (e.g. without sufficient filters) from becoming a performance issue. These limits are set in the configuration file, either for a specific driver or across all drivers. These limits are read at the Manager level and passed into individual drivers as part of the Hints list object. A driver should try and honor any such limit if possible, but if it is unable to do so then it may ignore it (and the truncation of the returned list of entities will happen at the controller level). Identity entity ID management between controllers and drivers ------------------------------------------------------------- Keystone supports the option of having domain-specific backends for the identity driver (i.e. for user and group storage), allowing, for example, a different LDAP server for each domain. To ensure that Keystone can determine to which backend it should route an API call, starting with Juno, the identity manager will, provided that domain-specific backends are enabled, build on-the-fly a persistent mapping table between Keystone Public IDs that are presented to the controller and the domain that holds the entity, along with whatever local ID is understood by the driver. This hides, for instance, the LDAP specifics of whatever ID is being used. To ensure backward compatibility, the default configuration of either a single SQL or LDAP backend for Identity will not use the mapping table, meaning that public facing IDs will be the unchanged. If keeping these IDs the same for the default LDAP backend is not required, then setting the configuration variable ``backward_compatible_ids`` to ``False`` will enable the mapping for the default LDAP driver, hence hiding the LDAP specifics of the IDs being used. Testing ------- Running Tests ============= Before running tests, you should have ``tox`` installed and available in your environment (in addition to the other external dependencies in :doc:`devref/development.environment`): .. code-block:: bash $ pip install tox .. NOTE:: You may need to perform both the above operation and the next inside a python virtualenv, or prefix the above command with ``sudo``, depending on your preference. To execute the full suite of tests maintained within Keystone, simply run: .. code-block:: bash $ tox This iterates over multiple configuration variations, and uses external projects to do light integration testing to verify the Identity API against other projects. .. NOTE:: The first time you run ``tox``, it will take additional time to build virtualenvs. You can later use the ``-r`` option with ``tox`` to rebuild your virtualenv in a similar manner. To run tests for one or more specific test environments (for example, the most common configuration of Python 2.7 and PEP-8), list the environments with the ``-e`` option, separated by spaces: .. code-block:: bash $ tox -e py27,pep8 See ``tox.ini`` for the full list of available test environments. Running with PDB ~~~~~~~~~~~~~~~~ Using PDB breakpoints with tox and testr normally doesn't work since the tests just fail with a BdbQuit exception rather than stopping at the breakpoint. To run with PDB breakpoints during testing, use the ``debug`` tox environment rather than ``py27``. Here's an example, passing the name of a test since you'll normally only want to run the test that hits your breakpoint: .. code-block:: bash $ tox -e debug keystone.tests.unit.test_auth.AuthWithToken.test_belongs_to For reference, the ``debug`` tox environment implements the instructions here: https://wiki.openstack.org/wiki/Testr#Debugging_.28pdb.29_Tests Disabling Stream Capture ~~~~~~~~~~~~~~~~~~~~~~~~ The stdout, stderr and log messages generated during a test are captured and in the event of a test failure those streams will be printed to the terminal along with the traceback. The data is discarded for passing tests. Each stream has an environment variable that can be used to force captured data to be discarded even if the test fails: `OS_STDOUT_CAPTURE` for stdout, `OS_STDERR_CAPTURE` for stderr and `OS_LOG_CAPTURE` for logging. If the value of the environment variable is not one of (True, true, 1, yes) the stream will be discarded. All three variables default to 1. For example, to discard logging data during a test run: .. code-block:: bash $ OS_LOG_CAPTURE=0 tox -e py27 Test Structure ============== Not all of the tests in the keystone/tests/unit directory are strictly unit tests. Keystone intentionally includes tests that run the service locally and drives the entire configuration to achieve basic functional testing. For the functional tests, an in-memory key-value store or in-memory SQLite database is used to keep the tests fast. Within the tests directory, the general structure of the backend tests is a basic set of tests represented under a test class, and then subclasses of those tests under other classes with different configurations to drive different backends through the APIs. For example, ``test_backend.py`` has a sequence of tests under the class :class:`~keystone.tests.unit.test_backend.IdentityTests` that will work with the default drivers as configured in this project's etc/ directory. ``test_backend_sql.py`` subclasses those tests, changing the configuration by overriding with configuration files stored in the ``tests/unit/config_files`` directory aimed at enabling the SQL backend for the Identity module. :class:`keystone.tests.unit.test_v2_keystoneclient.ClientDrivenTestCase` uses the installed python-keystoneclient, verifying it against a temporarily running local keystone instance to explicitly verify basic functional testing across the API. Testing Schema Migrations ========================= The application of schema migrations can be tested using SQLAlchemy Migrate’s built-in test runner, one migration at a time. .. WARNING:: This may leave your database in an inconsistent state; attempt this in non-production environments only! This is useful for testing the *next* migration in sequence (both forward & backward) in a database under version control: .. code-block:: bash $ python keystone/common/sql/migrate_repo/manage.py test \ --url=sqlite:///test.db \ --repository=keystone/common/sql/migrate_repo/ This command references to a SQLite database (test.db) to be used. Depending on the migration, this command alone does not make assertions as to the integrity of your data during migration. Writing Tests ============= To add tests covering all drivers, update the base test class in ``test_backend.py``. .. NOTE:: The structure of backend testing is in transition, migrating from having all classes in a single file (test_backend.py) to one where there is a directory structure to reduce the size of the test files. See: - :mod:`keystone.tests.unit.backend.role` - :mod:`keystone.tests.unit.backend.domain_config` To add new drivers, subclass the ``test_backend.py`` (look towards ``test_backend_sql.py`` or ``test_backend_kvs.py`` for examples) and update the configuration of the test class in ``setUp()``. Further Testing =============== devstack_ is the *best* way to quickly deploy Keystone with the rest of the OpenStack universe and should be critical step in your development workflow! You may also be interested in either the `OpenStack Continuous Integration Infrastructure`_ or the `OpenStack Integration Testing Project`_. .. _devstack: http://docs.openstack.org/developer/devstack/ .. _OpenStack Continuous Integration Infrastructure: http://docs.openstack.org/infra/system-config .. _OpenStack Integration Testing Project: https://git.openstack.org/cgit/openstack/tempest LDAP Tests ========== LDAP has a fake backend that performs rudimentary operations. If you are building more significant LDAP functionality, you should test against a live LDAP server. Devstack has an option to set up a directory server for Keystone to use. Add ldap to the ``ENABLED_SERVICES`` environment variable, and set environment variables ``KEYSTONE_IDENTITY_BACKEND=ldap`` and ``KEYSTONE_CLEAR_LDAP=yes`` in your ``localrc`` file. The unit tests can be run against a live server with ``keystone/tests/unit/test_ldap_livetest.py`` and ``keystone/tests/unit/test_ldap_pool_livetest.py``. The default password is ``test`` but if you have installed devstack with a different LDAP password, modify the file ``keystone/tests/unit/config_files/backend_liveldap.conf`` and ``keystone/tests/unit/config_files/backend_pool_liveldap.conf`` to reflect your password. .. NOTE:: To run the live tests you need to set the environment variable ``ENABLE_LDAP_LIVE_TEST`` to a non-negative value. "Work in progress" Tests ======================== Work in progress (WIP) tests are very useful in a variety of situations including: * During a TDD process they can be used to add tests to a review while they are not yet working and will not cause test failures. (They should be removed before the final merge.) * Often bug reports include small snippets of code to show broken behaviors. Some of these can be converted into WIP tests that can later be worked on by a developer. This allows us to take code that can be used to catch bug regressions and commit it before any code is written. The :func:`keystone.tests.unit.utils.wip` decorator can be used to mark a test as WIP. A WIP test will always be run. If the test fails then a TestSkipped exception is raised because we expect the test to fail. We do not pass the test in this case so that it doesn't count toward the number of successfully run tests. If the test passes an AssertionError exception is raised so that the developer knows they made the test pass. This is a reminder to remove the decorator. The :func:`~keystone.tests.unit.utils.wip` decorator requires that the author provides a message. This message is important because it will tell other developers why this test is marked as a work in progress. Reviewers will require that these messages are descriptive and accurate. .. NOTE:: The :func:`~keystone.tests.unit.utils.wip` decorator is not a replacement for skipping tests. .. code-block:: python @wip('waiting on bug #000000') def test(): pass .. NOTE:: Another strategy is to not use the wip decorator and instead show how the code currently incorrectly works. Which strategy is chosen is up to the developer. Generating Updated Sample Config File ------------------------------------- Keystone's sample configuration file ``etc/keystone.conf.sample`` is automatically generated based upon all of the options available within Keystone. These options are sourced from the many files around Keystone as well as some external libraries. The sample configuration file is now kept up to date by an infra job that generates the config file and if there are any changes will propose a review as the OpenStack Proposal Bot. Developers should *NOT* generate the config file and propose it as part of their patches since the proposal bot will do this for you. To generate a new sample configuration to see what it looks like, run: .. code-block:: bash $ tox -egenconfig -r The tox command will place an updated sample config in ``etc/keystone.conf.sample``. If there is a new external library (e.g. ``oslo.messaging``) that utilizes the ``oslo.config`` package for configuration, it can be added to the list of libraries found in ``config-generator/keystone.conf``. Translated responses -------------------- The Keystone server can provide error responses translated into the language in the ``Accept-Language`` header of the request. In order to test this in your development environment, there's a couple of things you need to do. 1. Build the message files. Run the following command in your keystone directory: .. code-block:: bash $ python setup.py compile_catalog This will generate .mo files like keystone/locale/[lang]/LC_MESSAGES/[lang].mo 2. When running Keystone, set the ``KEYSTONE_LOCALEDIR`` environment variable to the keystone/locale directory. For example: .. code-block:: bash $ KEYSTONE_LOCALEDIR=/opt/stack/keystone/keystone/locale keystone-all Now you can get a translated error response: .. code-block:: bash $ curl -s -H "Accept-Language: zh" http://localhost:5000/notapath | python -mjson.tool { "error": { "code": 404, "message": "\u627e\u4e0d\u5230\u8cc7\u6e90\u3002", "title": "Not Found" } } Caching Layer ------------- The caching layer is designed to be applied to any ``manager`` object within Keystone via the use of the ``on_arguments`` decorator provided in the ``keystone.common.cache`` module. This decorator leverages `dogpile.cache`_ caching system to provide a flexible caching backend. It is recommended that each of the managers have an independent toggle within the config file to enable caching. The easiest method to utilize the toggle within the configuration file is to define a ``caching`` boolean option within that manager's configuration section (e.g. ``identity``). Once that option is defined you can pass function to the ``on_arguments`` decorator with the named argument ``should_cache_fn``. In the ``keystone.common.cache`` module, there is a function called ``should_cache_fn``, which will provide a reference, to a function, that will consult the global cache ``enabled`` option as well as the specific manager's caching enable toggle. .. NOTE:: If a section-specific boolean option is not defined in the config section specified when calling ``should_cache_fn``, the returned function reference will default to enabling caching for that ``manager``. Example use of cache and ``should_cache_fn`` (in this example, ``token`` is the manager): .. code-block:: python from keystone.common import cache SHOULD_CACHE = cache.should_cache_fn('token') @cache.on_arguments(should_cache_fn=SHOULD_CACHE) def cacheable_function(arg1, arg2, arg3): ... return some_value With the above example, each call to the ``cacheable_function`` would check to see if the arguments passed to it matched a currently valid cached item. If the return value was cached, the caching layer would return the cached value; if the return value was not cached, the caching layer would call the function, pass the value to the ``SHOULD_CACHE`` function reference, which would then determine if caching was globally enabled and enabled for the ``token`` manager. If either caching toggle is disabled, the value is returned but not cached. It is recommended that each of the managers have an independent configurable time-to-live (TTL). If a configurable TTL has been defined for the manager configuration section, it is possible to pass it to the ``cache.on_arguments`` decorator with the named-argument ``expiration_time``. For consistency, it is recommended that this option be called ``cache_time`` and default to ``None``. If the ``expiration_time`` argument passed to the decorator is set to ``None``, the expiration time will be set to the global default (``expiration_time`` option in the ``[cache]`` configuration section. Example of using a section specific ``cache_time`` (in this example, ``identity`` is the manager): .. code-block:: python from keystone.common import cache SHOULD_CACHE = cache.should_cache_fn('identity') @cache.on_arguments(should_cache_fn=SHOULD_CACHE, expiration_time=CONF.identity.cache_time) def cachable_function(arg1, arg2, arg3): ... return some_value For cache invalidation, the ``on_arguments`` decorator will add an ``invalidate`` method (attribute) to your decorated function. To invalidate the cache, you pass the same arguments to the ``invalidate`` method as you would the normal function. Example (using the above cacheable_function): .. code-block:: python def invalidate_cache(arg1, arg2, arg3): cacheable_function.invalidate(arg1, arg2, arg3) .. WARNING:: The ``on_arguments`` decorator does not accept keyword-arguments/named arguments. An exception will be raised if keyword arguments are passed to a caching-decorated function. .. NOTE:: In all cases methods work the same as functions except if you are attempting to invalidate the cache on a decorated bound-method, you need to pass ``self`` to the ``invalidate`` method as the first argument before the arguments. .. _`dogpile.cache`: http://dogpilecache.readthedocs.org/ dogpile.cache based Key-Value-Store (KVS) ----------------------------------------- The ``dogpile.cache`` based KVS system has been designed to allow for flexible stores for the backend of the KVS system. The implementation allows for the use of any normal ``dogpile.cache`` cache backends to be used as a store. All interfacing to the KVS system happens via the ``KeyValueStore`` object located at ``keystone.common.kvs.KeyValueStore``. To utilize the KVS system an instantiation of the ``KeyValueStore`` class is needed. To acquire a KeyValueStore instantiation use the ``keystone.common.kvs.get_key_value_store`` factory function. This factory will either create a new ``KeyValueStore`` object or retrieve the already instantiated ``KeyValueStore`` object by the name passed as an argument. The object must be configured before use. The KVS object will only be retrievable with the ``get_key_value_store`` function while there is an active reference outside of the registry. Once all references have been removed the object is gone (the registry uses a ``weakref`` to match the object to the name). Example Instantiation and Configuration: .. code-block:: python kvs_store = kvs.get_key_value_store('TestKVSRegion') kvs_store.configure('openstack.kvs.Memory', ...) Any keyword arguments passed to the configure method that are not defined as part of the KeyValueStore object configuration are passed to the backend for further configuration (e.g. memcached servers, lock_timeout, etc). The memcached backend uses the Keystone manager mechanism to support the use of any of the provided memcached backends (``bmemcached``, ``pylibmc``, and basic ``memcached``). By default the ``memcached`` backend is used. Currently the Memcache URLs come from the ``servers`` option in the ``[memcache]`` configuration section of the Keystone config. The following is an example showing how to configure the KVS system to use a KeyValueStore object named "TestKVSRegion" and a specific Memcached driver: .. code-block:: python kvs_store = kvs.get_key_value_store('TestKVSRegion') kvs_store.configure('openstack.kvs.Memcached', memcached_backend='Memcached') The memcached backend supports a mechanism to supply an explicit TTL (in seconds) to all keys set via the KVS object. This is accomplished by passing the argument ``memcached_expire_time`` as a keyword argument to the ``configure`` method. Passing the ``memcache_expire_time`` argument will cause the ``time`` argument to be added to all ``set`` and ``set_multi`` calls performed by the memcached client. ``memcached_expire_time`` is an argument exclusive to the memcached dogpile backend, and will be ignored if passed to another backend: .. code-block:: python kvs_store.configure('openstack.kvs.Memcached', memcached_backend='Memcached', memcached_expire_time=86400) If an explicit TTL is configured via the ``memcached_expire_time`` argument, it is possible to exempt specific keys from receiving the TTL by passing the argument ``no_expiry_keys`` (list) as a keyword argument to the ``configure`` method. ``no_expiry_keys`` should be supported by all OpenStack-specific dogpile backends (memcached) that have the ability to set an explicit TTL: .. code-block:: python kvs_store.configure('openstack.kvs.Memcached', memcached_backend='Memcached', memcached_expire_time=86400, no_expiry_keys=['key', 'second_key', ...]) .. NOTE:: For the non-expiring keys functionality to work, the backend must support the ability for the region to set the key_mangler on it and have the attribute ``raw_no_expiry_keys``. In most cases, support for setting the key_mangler on the backend is handled by allowing the region object to set the ``key_mangler`` attribute on the backend. The ``raw_no_expiry_keys`` attribute is expected to be used to hold the values of the keyword argument ``no_expiry_keys`` prior to hashing. It is the responsibility of the backend to use these raw values to determine if a key should be exempt from expiring and not set the TTL on the non-expiring keys when the ``set`` or ``set_multi`` methods are called. Typically the key will be hashed by the region using its key_mangler method before being passed to the backend to set the value in the KeyValueStore. This means that in most cases, the backend will need to either pre-compute the hashed versions of the keys (when the key_mangler is set) and store a cached copy, or hash each item in the ``raw_no_expiry_keys`` attribute on each call to ``.set()`` and ``.set_multi()``. The ``memcached`` backend handles this hashing and caching of the keys by utilizing an ``@property`` method for the ``.key_mangler`` attribute on the backend and utilizing the associated ``.settr()`` method to front-load the hashing work at attribute set time. Once a KVS object has been instantiated the method of interacting is the same as most memcache implementations: .. code-block:: python kvs_store = kvs.get_key_value_store('TestKVSRegion') kvs_store.configure(...) # Set a Value kvs_store.set(, ) # Retrieve a value: retrieved_value = kvs_store.get() # Delete a key/value pair: kvs_store.delete() # multi-get: kvs_store.get_multi([, , ...]) # multi-set: kvs_store.set_multi(dict(=, =, ...)) # multi-delete kvs_store.delete_multi([, , ...]) There is a global configuration option to be aware of (that can be set in the ``[kvs]`` section of the Keystone configuration file): ``enable_key_mangler`` can be set top false, disabling the use of key_manglers (modification of the key when saving to the backend to help prevent collisions or exceeding key size limits with memcached). .. NOTE:: The ``enable_key_mangler`` option in the ``[kvs]`` section of the Keystone configuration file is not the same option (and does not affect the cache-layer key manglers) from the option in the ``[cache]`` section of the configuration file. Similarly the ``[cache]`` section options relating to key manglers has no bearing on the ``[kvs]`` objects. .. WARNING:: Setting the ``enable_key_mangler`` option to False can have detrimental effects on the KeyValueStore backend. It is recommended that this value is not set to False except for debugging issues with the ``dogpile.cache`` backend itself. Any backends that are to be used with the ``KeyValueStore`` system need to be registered with dogpile. For in-tree/provided backends, the registration should occur in ``keystone/common/kvs/__init__.py``. For backends that are developed out of tree, the location should be added to the ``backends`` option in the ``[kvs]`` section of the Keystone configuration:: [kvs] backends = backend_module1.backend_class1,backend_module2.backend_class2 All registered backends will receive the "short name" of "openstack.kvs." for use in the ``configure`` method on the ``KeyValueStore`` object. The ```` of a backend must be globally unique. dogpile.cache based MongoDB (NoSQL) backend -------------------------------------------- The ``dogpile.cache`` based MongoDB backend implementation allows for various MongoDB configurations, e.g., standalone, a replica set, sharded replicas, with or without SSL, use of TTL type collections, etc. Example of typical configuration for MongoDB backend: .. code-block:: python from dogpile.cache import region arguments = { 'db_hosts': 'localhost:27017', 'db_name': 'ks_cache', 'cache_collection': 'cache', 'username': 'test_user', 'password': 'test_password', # optional arguments 'son_manipulator': 'my_son_manipulator_impl' } region.make_region().configure('keystone.cache.mongo', arguments=arguments) The optional `son_manipulator` is used to manipulate custom data type while its saved in or retrieved from MongoDB. If the dogpile cached values contain built-in data types and no custom classes, then the provided implementation class is sufficient. For further details, refer http://api.mongodb.org/python/current/examples/custom_type.html#automatic-encoding-and-decoding Similar to other backends, this backend can be added via Keystone configuration in ``keystone.conf``:: [cache] # Global cache functionality toggle. enabled = True # Referring to specific cache backend backend = keystone.cache.mongo # Backend specific configuration arguments backend_argument = db_hosts:localhost:27017 backend_argument = db_name:ks_cache backend_argument = cache_collection:cache backend_argument = username:test_user backend_argument = password:test_password This backend is registered in ``keystone.common.cache.core`` module. So, its usage is similar to other dogpile caching backends as it implements the same dogpile APIs. Building the Documentation -------------------------- The documentation is generated with Sphinx using the tox command. To create HTML docs and man pages: .. code-block:: bash $ tox -e docs The results are in the doc/build/html and doc/build/man directories respectively. Release Notes ------------- The release notes for a patch should be included in the patch. If not, the release notes should be in a follow-on review. If the following applies to the patch, a release note is required: * The deployer needs to take an action when upgrading * The backend driver interface changes * A new feature is implemented * Function was removed (hopefully it was deprecated) * Current behavior is changed * A new config option is added that the deployer should consider changing from the default * A security bug is fixed A release note is suggested if a long-standing or important bug is fixed. Otherwise, a release note is not required. Keystone uses `reno `_ to generate release notes. Please read the docs for details. In summary, use .. code-block:: bash $ tox -e venv -- reno new Then edit the sample file that was created and push it with your change. To see the results: .. code-block:: bash $ git commit # Commit the change because reno scans git log. $ tox -e releasenotes Then look at the generated release notes files in releasenotes/build/html in your favorite browser. keystone-9.0.0/doc/source/man/0000775000567000056710000000000012701407246017345 5ustar jenkinsjenkins00000000000000keystone-9.0.0/doc/source/man/keystone-manage.rst0000664000567000056710000001242512701407102023161 0ustar jenkinsjenkins00000000000000=============== keystone-manage =============== --------------------------- Keystone Management Utility --------------------------- :Author: openstack@lists.openstack.org :Date: 2016-4-7 :Copyright: OpenStack Foundation :Version: 9.0.0 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== keystone-manage [options] DESCRIPTION =========== ``keystone-manage`` is the command line tool which interacts with the Keystone service to initialize and update data within Keystone. Generally, ``keystone-manage`` is only used for operations that cannot be accomplished with the HTTP API, such data import/export and database migrations. USAGE ===== ``keystone-manage [options] action [additional args]`` General keystone-manage options: -------------------------------- * ``--help`` : display verbose help output. Invoking ``keystone-manage`` by itself will give you some usage information. Available commands: * ``bootstrap``: Perform the basic bootstrap process. * ``db_sync``: Sync the database. * ``db_version``: Print the current migration version of the database. * ``domain_config_upload``: Upload domain configuration file. * ``fernet_rotate``: Rotate keys in the Fernet key repository. * ``fernet_setup``: Setup a Fernet key repository. * ``mapping_purge``: Purge the identity mapping table. * ``mapping_engine``: Test your federation mapping rules. * ``pki_setup``: Initialize the certificates used to sign tokens. **deprecated** * ``saml_idp_metadata``: Generate identity provider metadata. * ``ssl_setup``: Generate certificates for SSL. * ``token_flush``: Purge expired tokens. OPTIONS ======= -h, --help show this help message and exit --config-dir DIR Path to a config directory to pull \*.conf files from. This file set is sorted, so as to provide a predictable parse order if individual options are over-ridden. The set is parsed after the file(s) specified via previous --config-file, arguments hence over-ridden options in the directory take precedence. --config-file PATH Path to a config file to use. Multiple config files can be specified, with values in later files taking precedence. The default files used are: None. --debug, -d Print debugging output (set logging level to DEBUG instead of default WARNING level). --log-config-append PATH, --log_config PATH The name of a logging configuration file. This file is appended to any existing logging configuration files. For details about logging configuration files, see the Python logging module documentation. --log-date-format DATE_FORMAT Format string for %(asctime)s in log records. Default: None . --log-dir LOG_DIR, --logdir LOG_DIR (Optional) The base directory used for relative --log- file paths. --log-file PATH, --logfile PATH (Optional) Name of log file to output to. If no default is set, logging will go to stdout. --log-format FORMAT DEPRECATED. A logging.Formatter log message format string which may use any of the available logging.LogRecord attributes. This option is deprecated. Please use logging_context_format_string and logging_default_format_string instead. --nodebug The inverse of --debug --nostandard-threads The inverse of --standard-threads --nouse-syslog The inverse of --use-syslog --nouse-syslog-rfc-format The inverse of --use-syslog-rfc-format --noverbose The inverse of --verbose --pydev-debug-host PYDEV_DEBUG_HOST Host to connect to for remote debugger. --pydev-debug-port PYDEV_DEBUG_PORT Port to connect to for remote debugger. --standard-threads Do not monkey-patch threading system modules. --syslog-log-facility SYSLOG_LOG_FACILITY Syslog facility to receive log lines. --use-syslog Use syslog for logging. Existing syslog format is DEPRECATED during I, and will change in J to honor RFC5424. --use-syslog-rfc-format (Optional) Enables or disables syslog rfc5424 format for logging. If enabled, prefixes the MSG part of the syslog message with APP-NAME (RFC5424). The format without the APP-NAME is deprecated in I, and will be removed in J. --verbose, -v Print more verbose output (set logging level to INFO instead of default WARNING level). --version show program's version number and exit FILES ===== None SEE ALSO ======== * `OpenStack Keystone `__ SOURCE ====== * Keystone is sourced in Gerrit git `Keystone `__ * Keystone bugs are managed at Launchpad `Keystone `__ keystone-9.0.0/doc/source/man/keystone-all.rst0000664000567000056710000001146612701407102022505 0ustar jenkinsjenkins00000000000000============ keystone-all ============ ------------------------ Keystone Startup Command ------------------------ :Author: openstack@lists.openstack.org :Date: 2015-10-15 :Copyright: OpenStack Foundation :Version: 8.0.0 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== :: keystone-all [-h] [--config-dir DIR] [--config-file PATH] [--debug] [--log-config-append PATH] [--log-date-format DATE_FORMAT] [--log-dir LOG_DIR] [--log-file PATH] [--log-format FORMAT] [--nodebug] [--nostandard-threads] [--nouse-syslog] [--nouse-syslog-rfc-format] [--noverbose] [--pydev-debug-host PYDEV_DEBUG_HOST] [--pydev-debug-port PYDEV_DEBUG_PORT] [--standard-threads] [--syslog-log-facility SYSLOG_LOG_FACILITY] [--use-syslog] [--use-syslog-rfc-format] [--verbose] [--version] DESCRIPTION =========== keystone-all starts both the service and administrative APIs in a single process to provide catalog, authorization, and authentication services for OpenStack. OPTIONS ======= -h, --help show this help message and exit --config-dir DIR Path to a config directory to pull \*.conf files from. This file set is sorted, so as to provide a predictable parse order if individual options are over-ridden. The set is parsed after the file(s) specified via previous --config-file, arguments hence over-ridden options in the directory take precedence. --config-file PATH Path to a config file to use. Multiple config files can be specified, with values in later files taking precedence. The default files used are: None. --debug, -d Print debugging output (set logging level to DEBUG instead of default WARNING level). --log-config-append PATH, --log_config PATH The name of a logging configuration file. This file is appended to any existing logging configuration files. For details about logging configuration files, see the Python logging module documentation. --log-date-format DATE_FORMAT Format string for %(asctime)s in log records. Default: None . --log-dir LOG_DIR, --logdir LOG_DIR (Optional) The base directory used for relative --log- file paths. --log-file PATH, --logfile PATH (Optional) Name of log file to output to. If no default is set, logging will go to stdout. --log-format FORMAT DEPRECATED. A logging.Formatter log message format string which may use any of the available logging.LogRecord attributes. This option is deprecated. Please use logging_context_format_string and logging_default_format_string instead. --nodebug The inverse of --debug --nostandard-threads The inverse of --standard-threads --nouse-syslog The inverse of --use-syslog --nouse-syslog-rfc-format The inverse of --use-syslog-rfc-format --noverbose The inverse of --verbose --pydev-debug-host PYDEV_DEBUG_HOST Host to connect to for remote debugger. --pydev-debug-port PYDEV_DEBUG_PORT Port to connect to for remote debugger. --standard-threads Do not monkey-patch threading system modules. --syslog-log-facility SYSLOG_LOG_FACILITY Syslog facility to receive log lines. --use-syslog Use syslog for logging. Existing syslog format is DEPRECATED during I, and will change in J to honor RFC5424. --use-syslog-rfc-format (Optional) Enables or disables syslog rfc5424 format for logging. If enabled, prefixes the MSG part of the syslog message with APP-NAME (RFC5424). The format without the APP-NAME is deprecated in I, and will be removed in J. --verbose, -v Print more verbose output (set logging level to INFO instead of default WARNING level). --version show program's version number and exit FILES ===== None SEE ALSO ======== * `OpenStack Keystone `__ SOURCE ====== * Keystone source is managed in Gerrit git `Keystone `__ * Keystone bugs are managed at Launchpad `Keystone `__ keystone-9.0.0/doc/source/configuringservices.rst0000664000567000056710000002223512701407102023375 0ustar jenkinsjenkins00000000000000.. Copyright 2011-2012 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ========================================== Configuring Services to work with Keystone ========================================== .. toctree:: :maxdepth: 1 Once Keystone is installed and running (see :doc:`configuration`), services need to be configured to work with it. To do this, we primarily install and configure middleware for the OpenStack service to handle authentication tasks or otherwise interact with Keystone. In general: * Clients making calls to the service will pass in an authentication token. * The Keystone middleware will look for and validate that token, taking the appropriate action. * It will also retrieve additional information from the token such as user name, user id, project name, project id, roles, etc... The middleware will pass those data down to the service as headers. More details on the architecture of that setup is described in the `authentication middleware documentation`_. Setting up credentials with ``keystone-manage bootstrap`` ========================================================= Setting up projects, users, and roles ------------------------------------- The ``keystone-manage bootstrap`` command will create a user, project and role, and will assign the newly created role to the newly created user on the newly created project. By default, the names of these new resources will be called ``admin``. The defaults may be overridden by calling ``--bootstrap-username``, ``--bootstrap-project-name`` and ``--bootstrap-role-name``. Each of these have an environment variable equivalent: ``OS_BOOTSTRAP_USERNAME``, ``OS_BOOTSTRAP_PROJECT_NAME`` and ``OS_BOOTSTRAP_ROLE_NAME``. A user password must also be supplied. This can be passed in as either ``--bootstrap-password``, or set as an environment variable using ``OS_BOOTSTRAP_PASSWORD``. Optionally, if specified by ``--bootstrap-public-url``, ``--bootstrap-admin-url`` and/or ``--bootstrap-internal-url`` or the equivalent environment variables, the command will create an identity service with the specified endpoint information. You may also configure the ``--bootstrap-region-id`` and ``--bootstrap-service-name`` for the endpoints to your deployment's requirements. .. NOTE:: It is strongly encouraged to configure the identity service and its endpoints while bootstrapping keystone. Minimally, keystone can be bootstrapped with: .. code-block:: bash $ keystone-manage bootstrap --bootstrap-password s3cr3t Verbosely, keystone can be bootstrapped with: .. code-block:: bash $ keystone-manage bootstrap --bootstrap-password s3cr3t --bootstrap-username admin \ --bootstrap-project-name admin \ --bootstrap-role-name admin \ --bootstrap-service-name keystone \ --bootstrap-region-id RegionOne \ --bootstrap-admin-url http://localhost:35357 \ --bootstrap-public-url http://localhost:5000 \ --bootstrap-internal-url http://localhost:5000 This will create an ``admin`` user with the ``admin`` role on the ``admin`` project. The user will have the password specified in the command. Note that both the user and the project will be created in the ``default`` domain. By not creating an endpoint in the catalog users will need to provide endpoint overrides to perform additional identity operations. By creating an ``admin`` user and an identity endpoint deployers may authenticate to keystone and perform identity operations like creating additional services and endpoints using that ``admin`` user. This will preclude the need to ever use or configure the ``admin_token`` (described below). To test a proper configuration, a user can use OpenStackClient CLI: .. code-block:: bash $ openstack project list --os-username admin --os-project-name admin \ --os-user-domain-id default --os-project-domain-id default \ --os-identity-api-version 3 --os-auth-url http://localhost:5000 \ --os-password s3cr3t Setting up credentials with Admin Token ======================================= Admin Token ----------- For a default installation of Keystone, before you can use the REST API, you need to define an authorization token. This is configured in ``keystone.conf`` file under the section ``[DEFAULT]``. In the sample file provided with the Keystone project, the line defining this token is:: [DEFAULT] admin_token = ADMIN A "shared secret" that can be used to bootstrap Keystone. This token does not represent a user, and carries no explicit authorization. To disable in production (highly recommended), remove AdminTokenAuthMiddleware from your paste application pipelines (for example, in keystone-paste.ini) Setting up projects, users, and roles ------------------------------------- You need to minimally define a project, user, and role to link the project and user as the most basic set of details to get other services authenticating and authorizing with Keystone. You will also want to create service users for nova, glance, swift, etc. to be able to use to authenticate users against Keystone. The ``auth_token`` middleware supports using either the shared secret described above as `admin_token` or users for each service. See :doc:`configuration` for a walk through on how to create projects, users, and roles. Setting up services =================== Creating Service Users ---------------------- To configure the OpenStack services with service users, we need to create a project for all the services, and then users for each of the services. We then assign those service users an ``admin`` role on the service project. This allows them to validate tokens - and to authenticate and authorize other user requests. Create a project for the services, typically named ``service`` (however, the name can be whatever you choose): .. code-block:: bash $ openstack project create service Create service users for ``nova``, ``glance``, ``swift``, and ``neutron`` (or whatever subset is relevant to your deployment): .. code-block:: bash $ openstack user create nova --password Sekr3tPass --project service Repeat this for each service you want to enable. Create an administrative role for the service accounts, typically named ``admin`` (however the name can be whatever you choose). For adding the administrative role to the service accounts, you'll need to know the name of the role you want to add. If you don't have it handy, you can look it up quickly with: .. code-block:: bash $ openstack role list Once you have it, grant the administrative role to the service users. This is all assuming that you've already created the basic roles and settings as described in :doc:`configuration`: .. code-block:: bash $ openstack role add admin --project service --user nova Defining Services ----------------- Keystone also acts as a service catalog to let other OpenStack systems know where relevant API endpoints exist for OpenStack Services. The OpenStack Dashboard, in particular, uses this heavily - and this **must** be configured for the OpenStack Dashboard to properly function. The endpoints for these services are defined in a template, an example of which is in the project as the file ``etc/default_catalog.templates``. Keystone supports two means of defining the services, one is the catalog template, as described above - in which case everything is detailed in that template. The other is a SQL backend for the catalog service, in which case after Keystone is online, you need to add the services to the catalog: .. code-block:: bash $ openstack service create compute --name nova \ --description "Nova Compute Service" $ openstack service create ec2 --name ec2 \ --description "EC2 Compatibility Layer" $ openstack service create image --name glance \ --description "Glance Image Service" $ openstack service create identity --name keystone \ --description "Keystone Identity Service" $ openstack service create object-store --name swift \ --description "Swift Service" Setting Up Auth-Token Middleware ================================ The Keystone project provides the auth-token middleware which validates that the request is valid before passing it on to the application. This must be installed and configured in the applications (such as Nova, Glance, Swift, etc.). The `authentication middleware documentation`_ describes how to install and configure this middleware. .. _`authentication middleware documentation`: http://docs.openstack.org/developer/keystonemiddleware/middlewarearchitecture.html keystone-9.0.0/doc/source/configure_tokenless_x509.rst0000664000567000056710000003002612701407102024151 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ Configuring Keystone for Tokenless Authorization ================================================ .. NOTE:: This feature is experimental and unsupported in Liberty. ----------- Definitions ----------- * `X.509 Tokenless Authorization`: Provides a means to authorize client operations within Keystone by using an X.509 SSL client certificate without having to issue a token. For details, please refer to the specs `Tokenless Authorization with X.509 Client SSL Certificate`_ .. _`Tokenless Authorization with X.509 Client SSL Certificate`: http://specs.openstack.org/openstack/keystone-specs/specs/liberty/keystone-tokenless-authz-with-x509-ssl-client-cert.html Prerequisites ------------- Keystone must be running in a web container with https enabled; tests have been done with Apache/2.4.7 running on Ubuntu 14.04 . Please refer to `running-keystone-in-httpd`_ and `apache-certificate-and-key-installation`_ as references for this setup. .. _`running-keystone-in-httpd`: http://docs.openstack.org/developer/keystone/apache-httpd.html .. _`apache-certificate-and-key-installation`: https://www.digitalocean.com/community/tutorials/how-to-create-a-ssl-certificate-on-apache-for-ubuntu-14-04 -------------------- Apache Configuration -------------------- To enable X.509 tokenless authorization, SSL has to be enabled and configured in the Apache virtual host file. The Client authentication attribute ``SSLVerifyClient`` should be set as ``optional`` to allow other token authentication methods and attribute ``SSLOptions`` needs to set as ``+StdEnvVars`` to allow certificate attributes to be passed. The following is the sample virtual host file used for the testing. .. code-block:: ini WSGIScriptAlias / /var/www/cgi-bin/keystone/main ErrorLog /var/log/apache2/keystone.log LogLevel debug CustomLog /var/log/apache2/access.log combined SSLEngine on SSLCertificateFile /etc/apache2/ssl/apache.cer SSLCertificateKeyFile /etc/apache2/ssl/apache.key SSLCACertificatePath /etc/apache2/capath SSLOptions +StdEnvVars SSLVerifyClient optional ---------------------- Keystone Configuration ---------------------- The following options can be defined in `keystone.conf`: * ``trusted_issuer`` - The multi-str list of trusted issuers to further filter the certificates that are allowed to participate in the X.509 tokenless authorization. If the option is absent then no certificates will be allowed. The naming format for the attributes of a Distinguished Name(DN) must be separated by a comma and contain no spaces; however spaces are allowed for the value of an attribute, like 'L=San Jose' in the example below. This configuration option may be repeated for multiple values. Please look at the sample below. * ``protocol`` - The protocol name for the X.509 tokenless authorization along with the option `issuer_attribute` below can look up its corresponding mapping. It defaults to ``x509``. * ``issuer_attribute`` - The issuer attribute that is served as an IdP ID for the X.509 tokenless authorization along with the protocol to look up its corresponding mapping. It is the environment variable in the WSGI environment that references to the Issuer of the client certificate. It defaults to ``SSL_CLIENT_I_DN``. This is a sample configuration for two `trusted_issuer` and a `protocol` set to ``x509``. .. code-block:: ini [tokenless_auth] trusted_issuer = emailAddress=mary@abc.com,CN=mary,OU=eng,O=abc,L=San Jose,ST=California,C=US trusted_issuer = emailAddress=john@openstack.com,CN=john,OU=keystone,O=openstack,L=Sunnyvale,ST=California,C=US protocol = x509 ------------- Setup Mapping ------------- Like federation, X.509 tokenless authorization also utilizes the mapping mechanism to formulate an identity. The identity provider must correspond to the issuer of the X.509 SSL client certificate. The protocol for the given identity is ``x509`` by default, but can be configurable. Create an Identity Provider(IdP) -------------------------------- In order to create an IdP, the issuer DN in the client certificate needs to be provided. The following sample is what a generic issuer DN looks like in a certificate. .. code-block:: ini E=john@openstack.com CN=john OU=keystone O=openstack L=Sunnyvale S=California C=US The issuer DN should be constructed as a string that contains no spaces and have the right order separated by commas like the example below. Please be aware that ``emailAddress`` and ``ST`` should be used instead of ``E`` and ``S`` that are shown in the above example. The following is the sample Python code used to create the IdP ID. .. code-block:: python import hashlib issuer_dn = 'emailAddress=john@openstack.com,CN=john,OU=keystone, O=openstack,L=Sunnyvale,ST=California,C=US' hashed_idp = hashlib.sha256(issuer_dn) idp_id = hashed_idp.hexdigest() print(idp_id) The output of the above Python code will be the IdP ID and the following sample curl command should be sent to keystone to create an IdP with the newly generated IdP ID. .. code-block:: bash curl -k -s -X PUT -H "X-Auth-Token: " \ -H "Content-Type: application/json" \ -d '{"identity_provider": {"description": "Stores keystone IDP identities.","enabled": true}}' \ https://:/v3/OS-FEDERATION/identity_providers/ Create a Map ------------ A mapping needs to be created to map the ``Subject DN`` in the client certificate as a user to yield a valid local user if the user's ``type`` defined as ``local`` in the mapping. For example, the client certificate has ``Subject DN`` as ``CN=alex,OU=eng,O=nice-network,L=Sunnyvale, ST=California,C=US``, in the following examples, ``user_name`` will be mapped to``alex`` and ``domain_name`` will be mapped to ``nice-network``. And it has user's ``type`` set to ``local``. If user's ``type`` is not defined, it defaults to ``ephemeral``. Please refer to `mod_ssl`_ for the detailed mapping attributes. .. _`mod_ssl`: http://httpd.apache.org/docs/current/mod/mod_ssl.html .. code-block:: javascript { "mapping": { "rules": [ { "local": [ { "user": { "name": "{0}", "domain": { "name": "{1}" }, "type": "local" } } ], "remote": [ { "type": "SSL_CLIENT_S_DN_CN" }, { "type": "SSL_CLIENT_S_DN_O" } ] } ] } } When user's ``type`` is not defined or set to ``ephemeral``, the mapped user does not have to be a valid local user but the mapping must yield at least one valid local group. For example: .. code-block:: javascript { "mapping": { "rules": [ { "local": [ { "user": { "name": "{0}", "type": "ephemeral" } }, { "group": { "id": "12345678" } } ], "remote": [ { "type": "SSL_CLIENT_S_DN_CN" } ] } ] } } The following sample curl command should be sent to keystone to create a mapping with the provided mapping ID. The mapping ID is user designed and it can be any string as opposed to IdP ID. .. code-block:: bash curl -k -s -H "X-Auth-Token: " \ -H "Content-Type: application/json" \ -d '{"mapping": {"rules": [{"local": [{"user": {"name": "{0}","type": "ephemeral"}},{"group": {"id": ""}}],"remote": [{"type": "SSL_CLIENT_S_DN_CN"}]}]}}' \ -X PUT https://:/v3/OS-FEDERATION/mappings/ Create a Protocol ----------------- The name of the protocol will be the one defined in `keystone.conf` as ``protocol`` which defaults to ``x509``. The protocol name is user designed and it can be any name as opposed to IdP ID. A protocol name and an IdP ID will uniquely identify a mapping. The following sample curl command should be sent to keystone to create a protocol with the provided protocol name that is defined in `keystone.conf`. .. code-block:: bash curl -k -s -H "X-Auth-Token: " \ -H "Content-Type: application/json" \ -d '{"protocol": {"mapping_id": ""}}' \ -X PUT https://:/v3/OS-FEDERATION/identity_providers//protocols/ ------------------------------- Setup ``auth_token`` middleware ------------------------------- In order to use ``auth_token`` middleware as the service client for X.509 tokenless authorization, both configurable options and scope information will need to be setup. Configurable Options -------------------- The following configurable options in ``auth_token`` middleware should set to the correct values: * ``auth_protocol`` - Set to ``https``. * ``certfile`` - Set to the full path of the certificate file. * ``keyfile`` - Set to the full path of the private key file. * ``cafile`` - Set to the full path of the trusted CA certificate file. Scope Information ----------------- The scope information will be passed from the headers with the following header attributes to: * ``X-Project-Id`` - If specified, its the project scope. * ``X-Project-Name`` - If specified, its the project scope. * ``X-Project-Domain-Id`` - If specified, its the domain of project scope. * ``X-Project-Domain-Name`` - If specified, its the domain of project scope. * ``X-Domain-Id`` - If specified, its the domain scope. * ``X-Domain-Name`` - If specified, its the domain scope. --------------------- Test It Out with cURL --------------------- Once the above configurations have been setup, the following curl command can be used for token validation. .. code-block:: bash curl -v -k -s -X GET --cert //x509client.crt \ --key //x509client.key \ --cacert //ca.crt \ -H "X-Project-Name: " \ -H "X-Project-Domain-Id: " \ -H "X-Subject-Token: " \ https://:/v3/auth/tokens | python -mjson.tool Details of the Options ---------------------- * ``--cert`` - The client certificate that will be presented to Keystone. The ``Issuer`` in the certificate along with the defined ``protocol`` in `keystone.conf` will uniquely identify the mapping. The ``Subject`` in the certificate will be mapped to the valid local user from the identified mapping. * ``--key`` - The corresponding client private key. * ``--cacert`` - It can be the Apache server certificate or its issuer (signer) certificate. * ``X-Project-Name`` - The project scope needs to be passed in the header. * ``X-Project-Domain-Id`` - Its the domain of project scope. * ``X-Subject-Token`` - The token to be validated. keystone-9.0.0/doc/source/services.rst0000664000567000056710000002210312701407102021134 0ustar jenkinsjenkins00000000000000=========================== Keystone for other services =========================== This document provides a summary of some things that other services need to know about how keystone works, and specifically about how they can take advantage of the v3 API. The v3 API was introduced as a stable API in the Grizzly release and included in the default pipeline ever since. Until recently, its use has been hidden from other services because the ``auth_token`` middleware translated the token format so that both versions look the same. Once the services need to make use of v3 features they need to know about how it works. Glossary ======== Service OpenStack services like identity, compute, image, etc. Project A project provides namespace and resource isolation for groups of OpenStack entities. Users must be assigned a role on a project in order to interact with it. Prior to the introduction of the v3 API, projects were referred to as tenants and the term is still used in reference to the v2.0 API. Domains ======= A major new feature in v3 is domains. Every project, user, and user group is owned by a domain (reflected by their ``domain_id`` value) which provides them their own namespace. For example, unlike in v2.0, usernames are no longer unique across the deployment. You can have two users with the same name, but they must be in different domains. However, user IDs are assigned to users by keystone and are expected to be unique across the deployment. All of this logic applies to both projects and user groups as well. Note that roles are *not* namespaced by domains. One of the great things about domains is that you can have one domain backed by SQL (for service users) and another backed by LDAP (the cloud is deployed into existing infrastructure). The "default" domain ==================== Conventionally the "default" domain has a domain ID of ``default`` and a domain name of ``Default``. It is created by ``keystone-manage db_sync`` and thus should always exist, although the domain ID is configurable in ``keystone.conf`` and the domain name is mutable through the v3 API. Because only the v3 API is domain-aware, we must work to avoid perceived namespace conflicts to v2.0 clients. The solution to this is to have a single domain serve as the implied namespace for all user and tenant references in v2.0. Thus, v2.0 clients can continue to be domain-unaware and avoid the security risk posed by potential namespace conflicts. *This is the only purpose of the default domain.* For example, I could otherwise create a domain in v3, create a user in that domain called "admin", authenticate using v2.0, and a domain-unaware v2.0 client might assume I'm the same "admin" user it has seen before and grant me escalated privileges. Instead, users outside of the default domain simply cannot authenticate against v2.0, nor can such tokens with references to users and projects outside the default domain be validated on the v2.0 API. From a v2.0 client's perspective, there's no way to specify the domain, so v2.0 operations implicitly work against the default domain. So if your client is only capable of using v2.0 and you need to get a token, then you can only get tokens for users and tenants (projects) in the default domain. In the real world, this means that if your default domain is backed by SQL and you have a separate domain for LDAP users, then you can't authenticate as an LDAP user using v2.0. Conversely, if your default domain is backed by a read-only LDAP driver, then you won't be able to create the service users using v2.0 clients because any SQL-backed domain is unreachable. From a v3 client's perspective, the default domain is not special, other than the fact that such a domain can generally be assumed to exist (assuming the deployment is also running the v2.0 API). It would be reasonable for a v3 client to assume a default user domain ID of ``default`` and a default project domain ID of ``default`` unless overridden by more specific configuration. To summarize, avoiding namespace conflicts in the v2.0 API is achieved by limiting the v2.0 API and its clients to working with users and projects which are namespaced by a single, arbitrary domain in v3. Token differences ================= The keystone service runs both v2.0 and v3, where v2.0 requests go to the ``/v2.0`` endpoint and v3 requests go to the ``/v3`` endpoint. If you're using the default pipeline that ships with keystone, then you don't need "enable" the v3 API in keystone, as it runs by default as a parallel pipeline to the v2.0 API. If you get a token using the v2.0 API, then you can use it to do v3 operations (such as list users). The reverse, using a v3 token against v2.0, is possible only in certain circumstances. For example, if you're using a project-scoped token wherein the user and project are both owned by the "default" domain, everything will work. Otherwise, token validation against the v2.0 API will fail. You can get a v2.0 token using ``POST /v2.0/tokens``. You can get a v3 token using ``POST /v3/auth/tokens``. Note that the responses are significantly different. For example, the service catalog is in a different format, and the v3 token conveys additional context (such as the user's domain and the project's domain). Domain-scoped tokens -------------------- Domain-scoped tokens are scoped to a domain rather than a project. These are useful for operating against keystone but are generally useless in other services that don't have use cases for domain-level operations. Unless a service has a real case for handling such authorization, they don't need to concern themselves with domain-scoped tokens. Auth Token middleware ===================== The ``auth_token`` middleware handles token validation for the different services. Conceptually, what happens is that ``auth_token`` pulls the token out of the ``X-Auth-Token`` request header, validates the token using keystone, produces information about the identity (the API user) and authorization context (the project, roles, etc) of the token, and sets environment variables with that data. The services typically take the environment variables, put them in the service's "context", and use the context for policy enforcement via ``oslo.policy``. Service tokens -------------- Service tokens are a feature where the ``auth_token`` middleware will also accept a service token in the ``X-Service-Token`` header. It does the same thing with the service token as the user token, but the results of the token are passed separately in environment variables for the service token (the service user, project, and roles). If the service knows about these then it can put this info in its "context" and use it for policy checks. For example, assuming there's a special policy rule called ``service_role`` that works like the ``role`` rule except checks the service roles, you could have an ``oslo.policy`` rule like ``service_role:service and user_id:%(user_id)s`` such that a service token is required along with the user owning the object. v2.0 or v3? ----------- By default, the ``auth_token`` middleware will use discovery to determine the best available API to use, or can be explicitly configured to use either v2.0 or v3. When discovery is used, ``auth_token`` will use v3 if keystone reports that v3 is available. If ``auth_token`` is configured to use v2.0, then it will fail when it receives a v3 token wherein the user is not in the default domain (for example, the domain that heat creates users in). So if at all possible, the ``auth_token`` middleware should be allowed to use v3. Additionally, as other services begin to utilize features which are only found in the v3 API, you'll need to use the v3 API in order to utilize those services. For example, heat creates users in an isolated domain, and thus requires the v3 API. Do this, not that ================= Config options for authentication --------------------------------- If you need to get a token, don't define options for username and password and get a token using v2.0. We've got an interface for using authentication plugins where there's an option for that supports v2.0 or v3 and potentially other authentication mechanisms (X.509 client certs!). If your config file doesn't have the domain for the user, it's not going to be able to use v3 for authentication. Picking the version ------------------- Use version discovery to figure out what version the identity server supports rather than configuring the version. Use OpenStack CLI not keystone CLI ---------------------------------- The keystone CLI is deprecated and will be removed soon. The `OpenStack CLI `_ has all the keystone CLI commands and even supports v3. Hierarchical Multitenancy ========================= This feature allows maintenance of a hierarchy of projects with "parent" projects operating as domains. The token format is the same (the token doesn't contain any info about the hierarchy). If the service needs to know the hierarchy it will have to use the v3 API to fetch the hierarchy. While you can't use v2.0 to set up the hierarchy, you can get a v2.0 token scoped to a project that's part of a hierarchy. keystone-9.0.0/doc/source/conf.py0000664000567000056710000002076012701407102020065 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # # keystone documentation build configuration file, created by # sphinx-quickstart on Mon Jan 9 12:02:59 2012. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import subprocess # NOTE(dstanek): adds _ to the builtins so keystone modules can be imported __builtins__['_'] = str # -- General configuration ---------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.viewcode', 'oslo_config.sphinxconfiggen', 'oslosphinx', ] config_generator_config_file = '../../config-generator/keystone.conf' sample_config_basename = '_static/keystone' todo_include_todos = True # Add any paths that contain templates here, relative to this directory. # if os.getenv('HUDSON_PUBLISH_DOCS'): # templates_path = ['_ga', '_templates'] # else: # templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'keystone' copyright = u'2012, OpenStack, LLC' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['old'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. modindex_common_prefix = ['keystone.'] # -- Options for man page output -------------------------------------------- # Grouping the document tree for man pages. # List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual' man_pages = [ ('man/keystone-manage', 'keystone-manage', u'Keystone Management Utility', [u'OpenStack'], 1), ('man/keystone-all', 'keystone-all', u'Keystone Startup Command', [u'OpenStack'], 1), ] # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme_path = ["."] # html_theme = '_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' git_cmd = ["git", "log", "--pretty=format:'%ad, commit %h'", "--date=local", "-n1"] html_last_updated_fmt = subprocess.Popen( git_cmd, stdout=subprocess.PIPE).communicate()[0] # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'keystonedoc' # -- Options for LaTeX output ------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples (source # start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', 'keystone.tex', u'Keystone Documentation', u'OpenStack', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for Texinfo output ----------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'keystone', u'Keystone Documentation', u'OpenStack', 'keystone', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # Example configuration for intersphinx: refer to the Python standard library. #intersphinx_mapping = {'http://docs.python.org/': None} keystone-9.0.0/doc/source/external-auth.rst0000664000567000056710000001477612701407102022113 0ustar jenkinsjenkins00000000000000=========================================== Using external authentication with Keystone =========================================== .. _external-auth: When Keystone is executed in a web server like :doc:`Apache HTTPD ` it is possible to use external authentication methods different from the authentication provided by the identity store backend or the different authentication plugins. For example, this makes possible to use an SQL identity backend together with, X.509 authentication or Kerberos, for example, instead of using the username and password combination. When a web server is in charge of authentication, it is normally possible to set the ``REMOTE_USER`` environment variable so that it can be used in the underlying application. Keystone can be configured to use that environment variable if set, so that the authentication is handled by the web server. Configuration ============= In Identity API v2, there is no way to disable external authentication. In order to activate the external authentication mechanism for Identity API v3, the ``external`` method must be in the list of enabled authentication methods. By default it is enabled, so if you don't want to use external authentication, remove it from the ``methods`` option in the ``auth`` section. To configure the plugin that should be used set the ``external`` option again in the ``auth`` section. There are two external authentication method plugins provided by Keystone: * ``DefaultDomain``: This plugin won't take into account the domain information that the external authentication method may pass down to Keystone and will always use the configured default domain. The ``REMOTE_USER`` variable is the username. This is the default if no plugin is given. * ``Domain``: This plugin expects that the ``REMOTE_DOMAIN`` variable contains the domain for the user. If this variable is not present, the configured default domain will be used. The ``REMOTE_USER`` variable is the username. Using HTTPD authentication ========================== Web servers like Apache HTTP support many methods of authentication. Keystone can profit from this feature and let the authentication be done in the web server, that will pass down the authenticated user to Keystone using the ``REMOTE_USER`` environment variable. This user must exist in advance in the identity backend to get a token from the controller. To use this method, Keystone should be running on :doc:`HTTPD `. X.509 example ------------- The following snippet for the Apache conf will authenticate the user based on a valid X.509 certificate from a known CA:: SSLEngine on SSLCertificateFile /etc/ssl/certs/ssl.cert SSLCertificateKeyFile /etc/ssl/private/ssl.key SSLCACertificatePath /etc/ssl/allowed_cas SSLCARevocationPath /etc/ssl/allowed_cas SSLUserName SSL_CLIENT_S_DN_CN SSLVerifyClient require SSLVerifyDepth 10 (...) Developing a WSGI middleware for authentication =============================================== In addition to the method described above, it is possible to implement other custom authentication mechanisms using the ``REMOTE_USER`` WSGI environment variable. .. ATTENTION:: Please note that even if it is possible to develop a custom authentication module, it is preferable to use the modules in the HTTPD server. Such authentication modules in webservers like Apache have normally undergone years of development and use in production systems and are actively maintained upstream. Developing a custom authentication module that implements the same authentication as an existing Apache module likely introduces a higher security risk. If you find you must implement a custom authentication mechanism, you will need to develop a custom WSGI middleware pipeline component. This middleware should set the environment variable ``REMOTE_USER`` to the authenticated username. Keystone then will assume that the user has been already authenticated upstream and will not try to authenticate it. However, as with HTTPD authentication, the user must exist in advance in the identity backend so that a proper token can be issued. Your code should set the ``REMOTE_USER`` if the user is properly authenticated, following the semantics below: .. code-block:: python from keystone.common import wsgi from keystone import exception class MyMiddlewareAuth(wsgi.Middleware): def __init__(self, *args, **kwargs): super(MyMiddlewareAuth, self).__init__(*args, **kwargs) def process_request(self, request): if request.environ.get('REMOTE_USER', None) is not None: # Assume that it is authenticated upstream return self.application if not self.is_auth_applicable(request): # Not applicable return self.application username = self.do_auth(request) if username is not None: # User is authenticated request.environ['REMOTE_USER'] = username else: # User is not authenticated, render exception raise exception.Unauthorized("Invalid user") Pipeline configuration ---------------------- Once you have your WSGI middleware component developed you have to add it to your pipeline. The first step is to add the middleware to your configuration file. Assuming that your middleware module is ``keystone.middleware.MyMiddlewareAuth``, you can configure it in your ``keystone-paste.ini`` as:: [filter:my_auth] paste.filter_factory = keystone.middleware.MyMiddlewareAuth.factory The second step is to add your middleware to the pipeline. The exact place where you should place it will depend on your code (i.e. if you need for example that the request body is converted from JSON before perform the authentication you should place it after the ``json_body`` filter) but it should be set before the ``public_service`` (for the ``public_api`` pipeline) or ``admin_service`` (for the ``admin_api`` pipeline), since they consume authentication. For example, if the original pipeline looks like this:: [pipeline:public_api] pipeline = url_normalize token_auth admin_token_auth json_body debug ec2_extension user_crud_extension public_service Your modified pipeline might then look like this:: [pipeline:public_api] pipeline = url_normalize token_auth admin_token_auth json_body my_auth debug ec2_extension user_crud_extension public_service keystone-9.0.0/doc/source/sample_config.rst0000664000567000056710000000076712701407102022133 0ustar jenkinsjenkins00000000000000============================== Keystone Configuration Options ============================== The following is a sample keystone configuration for adaptation and use. It is auto-generated from keystone when this documentation is built, so if you are having issues with an option, please compare your version of keystone with the version of this documentation. The sample configuration can also be viewed in `file form <_static/keystone.conf.sample>`_. .. literalinclude:: _static/keystone.conf.sample keystone-9.0.0/doc/source/api_curl_examples.rst0000664000567000056710000007353712701407102023026 0ustar jenkinsjenkins00000000000000.. Copyright 2011-2012 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ======================= API Examples using Curl ======================= -------------------------- v3 API Examples Using Curl -------------------------- Tokens ====== Default scope ------------- Get a token with default scope (may be unscoped): .. code-block:: bash curl -i \ -H "Content-Type: application/json" \ -d ' { "auth": { "identity": { "methods": ["password"], "password": { "user": { "name": "admin", "domain": { "id": "default" }, "password": "adminpwd" } } } } }' \ http://localhost:5000/v3/auth/tokens ; echo Example response:: HTTP/1.1 201 Created X-Subject-Token: MIIFvgY... Vary: X-Auth-Token Content-Type: application/json Content-Length: 1025 Date: Tue, 10 Jun 2014 20:55:16 GMT {"token": {"methods": ["password"], "roles": [{"id": "9fe2ff9ee4384b1894a90878d3e92bab", "name": "_member_"}, {"id": "c703057be878458588961ce9a0ce686b", "name": "admin"}], "expires_at": "2014-06-10T2:55:16.806001Z", "project": {"domain": {"id": "default", "name": "Default"}, "id": "8538a3f13f9541b28c2620eb19065e45", "name": "admin"}, "catalog": [{"endpoints": [{"url": "http://localhost:3537/v2.0", "region": "RegionOne", "interface": "admin", "id": "29beb2f1567642eb810b042b6719ea88"}, {"url": "http://localhost:5000/v2.0", "region": "RegionOne", "interface": "internal", "id": "8707e3735d4415c97ae231b4841eb1c"}, {"url": "http://localhost:5000/v2.0", "region": "RegionOne", "interface": "public", "id": "ef303187fc8d41668f25199c298396a5"}], "type": "identity", "id": "bd73972c0e14fb69bae8ff76e112a90", "name": "keystone"}], "extras": {}, "user": {"domain": {"id": "default", "name": "Default"}, "id": "3ec3164f750146be97f21559ee4d9c51", "name": "admin"}, "audit_ids": ["yRt0UrxJSs6-WYJgwEMMmg"], "issued_at": "201406-10T20:55:16.806027Z"}} Project-scoped -------------- Get a project-scoped token: .. code-block:: bash curl -i \ -H "Content-Type: application/json" \ -d ' { "auth": { "identity": { "methods": ["password"], "password": { "user": { "name": "admin", "domain": { "id": "default" }, "password": "adminpwd" } } }, "scope": { "project": { "name": "demo", "domain": { "id": "default" } } } } }' \ http://localhost:5000/v3/auth/tokens ; echo Example response:: HTTP/1.1 201 Created X-Subject-Token: MIIFfQ... Vary: X-Auth-Token Content-Type: application/json Content-Length: 960 Date: Tue, 10 Jun 2014 20:40:14 GMT {"token": {"audit_ids": ["ECwrVNWbSCqmEgPnu0YCRw"], "methods": ["password"], "roles": [{"id": "c703057be878458588961ce9a0ce686b", "name": "admin"}], "expires_at": "2014-06-10T21:40:14.360795Z", "project": {"domain": {"id": "default", "name": "Default"}, "id": "3d4c2c82bd5948f0bcab0cf3a7c9b48c", "name": "demo"}, "catalog": [{"endpoints": [{"url": "http://localhost:35357/v2.0", "region": "RegionOne", "interface": "admin", "id": "29beb2f1567642eb810b042b6719ea88"}, {"url": "http://localhost:5000/v2.0", "region": "RegionOne", "interface": "internal", "id": "87057e3735d4415c97ae231b4841eb1c"}, {"url": "http://localhost:5000/v2.0", "region": "RegionOne", "interface": "public", "id": "ef303187fc8d41668f25199c298396a5"}], "type": "identity", "id": "bd7397d2c0e14fb69bae8ff76e112a90", "name": "keystone"}], "extras": {}, "user": {"domain": {"id": "default", "name": "Default"}, "id": "3ec3164f750146be97f21559ee4d9c51", "name": "admin"}, "issued_at": "2014-06-10T20:40:14.360822Z"}} Domain-Scoped ------------- Get a domain-scoped token (Note that you're going to need a role-assignment on the domain first!): .. code-block:: bash curl -i \ -H "Content-Type: application/json" \ -d ' { "auth": { "identity": { "methods": ["password"], "password": { "user": { "name": "admin", "domain": { "id": "default" }, "password": "adminpwd" } } }, "scope": { "domain": { "id": "default" } } } }' \ http://localhost:5000/v3/auth/tokens ; echo Example response:: HTTP/1.1 201 Created X-Subject-Token: MIIFNg... Vary: X-Auth-Token Content-Type: application/json Content-Length: 889 Date: Tue, 10 Jun 2014 20:52:59 GMT {"token": {"domain": {"id": "default", "name": "Default"}, "methods": ["password"], "roles": [{"id": "c703057be878458588961ce9a0ce686b", "name": "admin"}], "expires_at": "2014-06-10T21:52:58.852167Z", "catalog": [{"endpoints": [{"url": "http://localhost:35357/v2.0", "region": "RegionOne", "interface": "admin", "id": "29beb2f1567642eb810b042b6719ea88"}, {"url": "http://localhost:5000/v2.0", "region": "RegionOne", "interface": "internal", "id": "87057e3735d4415c97ae231b4841eb1c"}, {"url": "http://localhost:5000/v2.0", "region": "RegionOne", "interface": "public", "id": "ef303187fc8d41668f25199c298396a5"}], "type": "identity", "id": "bd7397d2c0e14fb69bae8ff76e112a90", "name": "keystone"}], "extras": {}, "user": {"domain": {"id": "default", "name": "Default"}, "id": "3ec3164f750146be97f21559ee4d9c51", "name": "admin"}, "audit_ids": ["Xpa6Uyn-T9S6mTREudUH3w"], "issued_at": "2014-06-10T20:52:58.852194Z"}} Getting a token from a token ---------------------------- Get a token from a token: .. code-block:: bash curl -i \ -H "Content-Type: application/json" \ -d ' { "auth": { "identity": { "methods": ["token"], "token": { "id": "'$OS_TOKEN'" } } } }' \ http://localhost:5000/v3/auth/tokens ; echo Example response:: HTTP/1.1 201 Created X-Subject-Token: MIIFxw... Vary: X-Auth-Token Content-Type: application/json Content-Length: 1034 Date: Tue, 10 Jun 2014 21:00:05 GMT {"token": {"methods": ["token", "password"], "expires_at": "2015-05-28T07:43:44.808209Z", "extras": {}, "user": {"domain": {"id": "default", "name": "Default"}, "id": "753867c25c3340ffad1abc22d488c31a", "name": "admin"}, "audit_ids": ["ZE0OPSuzTmCXHo0eIOYltw", "xxIQCkHOQOywL0oY6CTppQ"], "issued_at": "2015-05-28T07:19:23.763532Z"}} .. note:: If a scope was included in the request body then this would get a token with the new scope. DELETE /v3/auth/tokens ---------------------- Revoke a token: .. code-block:: bash curl -i -X DELETE \ -H "X-Auth-Token: $OS_TOKEN" \ -H "X-Subject-Token: $OS_TOKEN" \ http://localhost:5000/v3/auth/tokens If there's no error then the response is empty. Domains ======= GET /v3/domains --------------- List domains: .. code-block:: bash curl -s \ -H "X-Auth-Token: $OS_TOKEN" \ http://localhost:5000/v3/domains | python -mjson.tool Example response: .. code-block:: javascript { "domains": [ { "description": "Owns users and tenants (i.e. projects) available on Identity API v2.", "enabled": true, "id": "default", "links": { "self": "http://identity-server:5000/v3/domains/default" }, "name": "Default" } ], "links": { "next": null, "previous": null, "self": "http://identity-server:5000/v3/domains" } } POST /v3/domains ---------------- Create a domain: .. code-block:: bash curl -s \ -H "X-Auth-Token: $OS_TOKEN" \ -H "Content-Type: application/json" \ -d '{ "domain": { "name": "newdomain"}}' \ http://localhost:5000/v3/domains | python -mjson.tool Example response: .. code-block:: javascript { "domain": { "enabled": true, "id": "3a5140aecd974bf08041328b53a62458", "links": { "self": "http://identity-server:5000/v3/domains/3a5140aecd974bf08041328b53a62458" }, "name": "newdomain" } } Projects ======== GET /v3/projects ---------------- List projects: .. code-block:: bash curl -s \ -H "X-Auth-Token: $OS_TOKEN" \ http://localhost:5000/v3/projects | python -mjson.tool Example response: .. code-block:: javascript { "links": { "next": null, "previous": null, "self": "http://localhost:5000/v3/projects" }, "projects": [ { "description": null, "domain_id": "default", "enabled": true, "id": "3d4c2c82bd5948f0bcab0cf3a7c9b48c", "links": { "self": "http://localhost:5000/v3/projects/3d4c2c82bd5948f0bcab0cf3a7c9b48c" }, "name": "demo" } ] } PATCH /v3/projects/{id} ----------------------- Disable a project: .. code-block:: bash curl -s -X PATCH \ -H "X-Auth-Token: $OS_TOKEN" \ -H "Content-Type: application/json" \ -d ' { "project": { "enabled": false } }'\ http://localhost:5000/v3/projects/$PROJECT_ID | python -mjson.tool Example response: .. code-block:: javascript { "project": { "description": null, "domain_id": "default", "enabled": false, "extra": {}, "id": "3d4c2c82bd5948f0bcab0cf3a7c9b48c", "links": { "self": "http://localhost:5000/v3/projects/3d4c2c82bd5948f0bcab0cf3a7c9b48c" }, "name": "demo" } } GET /v3/services ================ List the services: .. code-block:: bash curl -s \ -H "X-Auth-Token: $OS_TOKEN" \ http://localhost:5000/v3/services | python -mjson.tool Example response: .. code-block:: javascript { "links": { "next": null, "previous": null, "self": "http://localhost:5000/v3/services" }, "services": [ { "description": "Keystone Identity Service", "enabled": true, "id": "bd7397d2c0e14fb69bae8ff76e112a90", "links": { "self": "http://localhost:5000/v3/services/bd7397d2c0e14fb69bae8ff76e112a90" }, "name": "keystone", "type": "identity" } ] } GET /v3/endpoints ================= List the endpoints: .. code-block:: bash curl -s \ -H "X-Auth-Token: $OS_TOKEN" \ http://localhost:5000/v3/endpoints | python -mjson.tool Example response: .. code-block:: javascript { "endpoints": [ { "enabled": true, "id": "29beb2f1567642eb810b042b6719ea88", "interface": "admin", "links": { "self": "http://localhost:5000/v3/endpoints/29beb2f1567642eb810b042b6719ea88" }, "region": "RegionOne", "service_id": "bd7397d2c0e14fb69bae8ff76e112a90", "url": "http://localhost:35357/v2.0" } ], "links": { "next": null, "previous": null, "self": "http://localhost:5000/v3/endpoints" } } Users ===== GET /v3/users ------------- List users: .. code-block:: bash curl -s \ -H "X-Auth-Token: $OS_TOKEN" \ http://localhost:5000/v3/users | python -mjson.tool POST /v3/users -------------- Create a user: .. code-block:: bash curl -s \ -H "X-Auth-Token: $OS_TOKEN" \ -H "Content-Type: application/json" \ -d '{"user": {"name": "newuser", "password": "changeme"}}' \ http://localhost:5000/v3/users | python -mjson.tool Example response: .. code-block:: javascript { "user": { "domain_id": "default", "enabled": true, "id": "ec8fc20605354edd91873f2d66bf4fc4", "links": { "self": "http://identity-server:5000/v3/users/ec8fc20605354edd91873f2d66bf4fc4" }, "name": "newuser" } } GET /v3/users/{user_id} ----------------------- Show details for a user: .. code-block:: bash USER_ID=ec8fc20605354edd91873f2d66bf4fc4 curl -s \ -H "X-Auth-Token: $OS_TOKEN" \ http://localhost:5000/v3/users/$USER_ID | python -mjson.tool Example response: .. code-block:: javascript { "user": { "domain_id": "default", "enabled": true, "id": "ec8fc20605354edd91873f2d66bf4fc4", "links": { "self": "http://localhost:5000/v3/users/ec8fc20605354edd91873f2d66bf4fc4" }, "name": "newuser" } } POST /v3/users/{user_id}/password --------------------------------- Change password (using the default policy, this can be done as the user): .. code-block:: bash USER_ID=b7793000f8d84c79af4e215e9da78654 ORIG_PASS=userpwd NEW_PASS=newuserpwd curl \ -H "X-Auth-Token: $OS_TOKEN" \ -H "Content-Type: application/json" \ -d '{ "user": {"password": "'$NEW_PASS'", "original_password": "'$ORIG_PASS'"} }' \ http://localhost:5000/v3/users/$USER_ID/password .. note:: This command doesn't print anything if the request was successful. PATCH /v3/users/{user_id} ------------------------- Reset password (using the default policy, this requires admin): .. code-block:: bash USER_ID=b7793000f8d84c79af4e215e9da78654 NEW_PASS=newuserpwd curl -s -X PATCH \ -H "X-Auth-Token: $OS_TOKEN" \ -H "Content-Type: application/json" \ -d '{ "user": {"password": "'$NEW_PASS'"} }' \ http://localhost:5000/v3/users/$USER_ID | python -mjson.tool Example response: .. code-block:: javascript { "user": { "default_project_id": "3d4c2c82bd5948f0bcab0cf3a7c9b48c", "domain_id": "default", "email": "demo@example.com", "enabled": true, "extra": { "email": "demo@example.com" }, "id": "269348fdd9374b8885da1418e0730af1", "links": { "self": "http://localhost:5000/v3/users/269348fdd9374b8885da1418e0730af1" }, "name": "demo" } } PUT /v3/projects/{project_id}/groups/{group_id}/roles/{role_id} =============================================================== Create group role assignment on project: .. code-block:: bash curl -s -X PUT \ -H "X-Auth-Token: $OS_TOKEN" \ http://localhost:5000/v3/projects/$PROJECT_ID/groups/$GROUP_ID/roles/$ROLE_ID | python -mjson.tool There's no data in the response if the operation is successful. POST /v3/OS-TRUST/trusts ======================== Create a trust: .. code-block:: bash curl -s \ -H "X-Auth-Token: $OS_TOKEN" \ -H "Content-Type: application/json" \ -d ' { "trust": { "expires_at": "2014-12-30T23:59:59.999999Z", "impersonation": false, "project_id": "'$PROJECT_ID'", "roles": [ { "name": "admin" } ], "trustee_user_id": "'$DEMO_USER_ID'", "trustor_user_id": "'$ADMIN_USER_ID'" }}'\ http://localhost:5000/v3/OS-TRUST/trusts | python -mjson.tool Example response: .. code-block:: javascript { "trust": { "expires_at": "2014-12-30T23:59:59.999999Z", "id": "394998fa61f14736b1f0c1f322882949", "impersonation": false, "links": { "self": "http://localhost:5000/v3/OS-TRUST/trusts/394998fa61f14736b1f0c1f322882949" }, "project_id": "3d4c2c82bd5948f0bcab0cf3a7c9b48c", "remaining_uses": null, "roles": [ { "id": "c703057be878458588961ce9a0ce686b", "links": { "self": "http://localhost:5000/v3/roles/c703057be878458588961ce9a0ce686b" }, "name": "admin" } ], "roles_links": { "next": null, "previous": null, "self": "http://localhost:5000/v3/OS-TRUST/trusts/394998fa61f14736b1f0c1f322882949/roles" }, "trustee_user_id": "269348fdd9374b8885da1418e0730af1", "trustor_user_id": "3ec3164f750146be97f21559ee4d9c51" } } ------------------------------- Service API Examples Using Curl ------------------------------- The service API is defined to be a subset of the Admin API and, by default, runs on port 5000. GET / ===== This call is identical to that documented for the Admin API, except that it uses port 5000, instead of port 35357, by default: .. code-block:: bash $ curl http://0.0.0.0:5000 or: .. code-block:: bash $ curl http://0.0.0.0:5000/v2.0/ See the `Admin API Examples Using Curl`_ for more info. GET /extensions =============== This call is identical to that documented for the Admin API. POST /tokens ============ This call is identical to that documented for the Admin API. GET /tenants ============ List all of the tenants your token can access: .. code-block:: bash $ curl -H "X-Auth-Token:887665443383838" http://localhost:5000/v2.0/tenants Returns: .. code-block:: javascript { "tenants_links": [], "tenants": [ { "enabled": true, "description": "None", "name": "customer-x", "id": "1" } ] } ----------------------------- Admin API Examples Using Curl ----------------------------- These examples assume a default port value of 35357, and depend on the ``sampledata`` bundled with keystone. GET / ===== Discover API version information, links to documentation (PDF, HTML, WADL), and supported media types: .. code-block:: bash $ curl http://0.0.0.0:35357 .. code-block:: javascript { "versions": { "values": [ { "id": "v3.4", "links": [ { "href": "http://127.0.0.1:35357/v3/", "rel": "self" } ], "media-types": [ { "base": "application/json", "type": "application/vnd.openstack.identity-v3+json" } ], "status": "stable", "updated": "2015-03-30T00:00:00Z" }, { "id": "v2.0", "links": [ { "href": "http://127.0.0.1:35357/v2.0/", "rel": "self" }, { "href": "http://docs.openstack.org/", "rel": "describedby", "type": "text/html" } ], "media-types": [ { "base": "application/json", "type": "application/vnd.openstack.identity-v2.0+json" } ], "status": "stable", "updated": "2014-04-17T00:00:00Z" } ] } } .. code-block:: bash $ curl http://0.0.0.0:35357/v2.0/ Returns: .. code-block:: javascript { "version": { "id": "v2.0", "links": [ { "href": "http://127.0.0.1:35357/v2.0/", "rel": "self" }, { "href": "http://docs.openstack.org/", "rel": "describedby", "type": "text/html" } ], "media-types": [ { "base": "application/json", "type": "application/vnd.openstack.identity-v2.0+json" } ], "status": "stable", "updated": "2014-04-17T00:00:00Z" } } GET /extensions =============== Discover the API extensions enabled at the endpoint: .. code-block:: bash $ curl http://localhost:35357/v2.0/extensions/ Returns: .. code-block:: javascript { "extensions":{ "values":[] } } POST /tokens ============ Authenticate by exchanging credentials for an access token: .. code-block:: bash $ curl -d '{"auth":{"tenantName": "customer-x", "passwordCredentials": {"username": "joeuser", "password": "secrete"}}}' -H "Content-type: application/json" http://localhost:35357/v2.0/tokens Returns: .. code-block:: javascript { "access":{ "token":{ "expires":"2012-02-05T00:00:00", "id":"887665443383838", "tenant":{ "id":"1", "name":"customer-x" } }, "serviceCatalog":[ { "endpoints":[ { "adminURL":"http://swift.admin-nets.local:8080/", "region":"RegionOne", "internalURL":"http://127.0.0.1:8080/v1/AUTH_1", "publicURL":"http://swift.publicinternets.com/v1/AUTH_1" } ], "type":"object-store", "name":"swift" }, { "endpoints":[ { "adminURL":"http://cdn.admin-nets.local/v1.1/1", "region":"RegionOne", "internalURL":"http://127.0.0.1:7777/v1.1/1", "publicURL":"http://cdn.publicinternets.com/v1.1/1" } ], "type":"object-store", "name":"cdn" } ], "user":{ "id":"1", "roles":[ { "tenantId":"1", "id":"3", "name":"Member" } ], "name":"joeuser" } } } .. note:: Take note of the value ['access']['token']['id'] value produced here (``887665443383838``, above), as you can use it in the calls below. GET /tokens/{token_id} ====================== .. note:: This call refers to a token known to be valid, ``887665443383838`` in this case. Validate a token: .. code-block:: bash $ curl -H "X-Auth-Token:999888777666" http://localhost:35357/v2.0/tokens/887665443383838 If the token is valid, returns: .. code-block:: javascript { "access":{ "token":{ "expires":"2012-02-05T00:00:00", "id":"887665443383838", "tenant":{ "id":"1", "name":"customer-x" } }, "user":{ "name":"joeuser", "tenantName":"customer-x", "id":"1", "roles":[ { "serviceId":"1", "id":"3", "name":"Member" } ], "tenantId":"1" } } } HEAD /tokens/{token_id} ======================= This is a high-performance variant of the GET call documented above, which by definition, returns no response body: .. code-block:: bash $ curl -I -H "X-Auth-Token:999888777666" http://localhost:35357/v2.0/tokens/887665443383838 ... which returns ``200``, indicating the token is valid:: HTTP/1.1 200 OK Content-Length: 0 Content-Type: None Date: Tue, 08 Nov 2011 23:07:44 GMT GET /tokens/{token_id}/endpoints ================================ List all endpoints for a token: .. code-block:: bash $ curl -H "X-Auth-Token:999888777666" http://localhost:35357/v2.0/tokens/887665443383838/endpoints Returns: .. code-block:: javascript { "endpoints_links": [ { "href": "http://127.0.0.1:35357/tokens/887665443383838/endpoints?'marker=5&limit=10'", "rel": "next" } ], "endpoints": [ { "internalURL": "http://127.0.0.1:8080/v1/AUTH_1", "name": "swift", "adminURL": "http://swift.admin-nets.local:8080/", "region": "RegionOne", "tenantId": 1, "type": "object-store", "id": 1, "publicURL": "http://swift.publicinternets.com/v1/AUTH_1" }, { "internalURL": "http://localhost:8774/v1.0", "name": "nova_compat", "adminURL": "http://127.0.0.1:8774/v1.0", "region": "RegionOne", "tenantId": 1, "type": "compute", "id": 2, "publicURL": "http://nova.publicinternets.com/v1.0/" }, { "internalURL": "http://localhost:8774/v1.1", "name": "nova", "adminURL": "http://127.0.0.1:8774/v1.1", "region": "RegionOne", "tenantId": 1, "type": "compute", "id": 3, "publicURL": "http://nova.publicinternets.com/v1.1/ }, { "internalURL": "http://127.0.0.1:9292/v1.1/", "name": "glance", "adminURL": "http://nova.admin-nets.local/v1.1/", "region": "RegionOne", "tenantId": 1, "type": "image", "id": 4, "publicURL": "http://glance.publicinternets.com/v1.1/" }, { "internalURL": "http://127.0.0.1:7777/v1.1/1", "name": "cdn", "adminURL": "http://cdn.admin-nets.local/v1.1/1", "region": "RegionOne", "tenantId": 1, "type": "object-store", "id": 5, "publicURL": "http://cdn.publicinternets.com/v1.1/1" } ] } GET /tenants ============ List all of the tenants in the system (requires an Admin ``X-Auth-Token``): .. code-block:: bash $ curl -H "X-Auth-Token:999888777666" http://localhost:35357/v2.0/tenants Returns: .. code-block:: javascript { "tenants_links": [], "tenants": [ { "enabled": false, "description": "None", "name": "project-y", "id": "3" }, { "enabled": true, "description": "None", "name": "ANOTHER:TENANT", "id": "2" }, { "enabled": true, "description": "None", "name": "customer-x", "id": "1" } ] } GET /tenants/{tenant_id} ======================== Retrieve information about a tenant, by tenant ID: .. code-block:: bash $ curl -H "X-Auth-Token:999888777666" http://localhost:35357/v2.0/tenants/1 Returns: .. code-block:: javascript { "tenant":{ "enabled":true, "description":"None", "name":"customer-x", "id":"1" } } GET /tenants/{tenant_id}/users/{user_id}/roles ============================================== List the roles a user has been granted on a tenant: .. code-block:: bash $ curl -H "X-Auth-Token:999888777666" http://localhost:35357/v2.0/tenants/1/users/1/roles Returns: .. code-block:: javascript { "roles_links":[], "roles":[ { "id":"3", "name":"Member" } ] } GET /users/{user_id} ==================== Retrieve information about a user, by user ID: .. code-block:: bash $ curl -H "X-Auth-Token:999888777666" http://localhost:35357/v2.0/users/1 Returns: .. code-block:: javascript { "user":{ "tenantId":"1", "enabled":true, "id":"1", "name":"joeuser" } } GET /tokens/revoked =================== Get the revocation list: .. code-block:: bash curl -s -H "X-Auth-Token: $OS_TOKEN" \ http://localhost:35357/v2.0/tokens/revoked | jq -r .signed | openssl cms -verify \ -certfile /etc/keystone/ssl/certs/signing_cert.pem \ -CAfile /etc/keystone/ssl/certs/ca.pem \ -inform PEM \ -nosmimecap -nodetach -nocerts -noattr 2>/dev/null | python -m json.tool Example response: .. code-block:: javascript { "revoked": [ { "expires": "2014-06-10T21:40:14Z", "id": "e6e2b5c9092751f88d2bcd30b09777a9" }, { "expires": "2014-06-10T21:47:29Z", "id": "883ef5d610bd1c68fbaa8ac528aa9f17" }, { "expires": "2014-06-10T21:51:52Z", "id": "41775ff4838f8f406b7bad28bea0dde6" } ] } keystone-9.0.0/doc/source/http-api.rst0000664000567000056710000002012612701407102021042 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ======== HTTP API ======== Specifications ============== Keystone implements two major HTTP API versions, along with several API extensions that build on top of each core API. The two APIs are specified as `Identity API v2.0`_ and `Identity API v3`_. Each API is specified by a single source of truth to avoid conflicts between documentation and implementation. The original source of truth for the v2.0 API is defined by a set of WADL and XSD files. The original source of truth for the v3 API is defined by documentation. .. _`Identity API v2.0`: http://specs.openstack.org/openstack/keystone-specs/#v2-0-api .. _`Identity API v3`: http://specs.openstack.org/openstack/keystone-specs/#v3-api History ======= You're probably wondering why Keystone does not implement a "v1" API. As a matter of fact, one exists, but it actually predates OpenStack. The v1.x API was an extremely small API documented and implemented by Rackspace for their early public cloud products. With the advent of OpenStack, Keystone served to provide a superset of the authentication and multi-tenant authorization models already implemented by Rackspace's public cloud, Nova, and Swift. Thus, Identity API v2.0 was introduced. Identity API v3 was established to introduce namespacing for users and projects by using "domains" as a higher-level container for more flexible identity management and fixed a security issue in the v2.0 API (bearer tokens appearing in URLs). Should I use v2.0 or v3? ======================== Identity API v3. Identity API v3 is a superset of all the functionality available in v2.0 and several of its extensions, and provides a much more consistent developer experience to boot. We're also on the road to deprecating, and ultimately reducing (or dropping) support for, Identity API v2.0. How do I migrate from v2.0 to v3? ================================= I am a deployer --------------- You'll need to ensure the v3 API is included in your Paste pipeline, usually ``etc/keystone-paste.ini``. Our `latest sample configuration`_ includes the v3 application pipeline. First define a v3 application, which refers to the v3 application factory method: .. code-block:: ini [app:service_v3] use = egg:keystone#service_v3 Then define a v3 pipeline, which terminates with the v3 application you defined above: .. code-block:: ini [pipeline:api_v3] pipeline = ... service_v3 Replace "..." with whatever middleware you'd like to run in front of the API service. Our `latest sample configuration`_ documents our tested recommendations, but your requirements may vary. Finally, include the v3 pipeline in at least one ``composite`` application (but usually both ``[composite:main]`` and ``[composite:admin]``), for example: .. code-block:: ini [composite:main] use = egg:Paste#urlmap /v3 = api_v3 ... Once your pipeline is configured to expose both v2.0 and v3, you need to ensure that you've configured your service catalog in Keystone correctly. The simplest, and most ideal, configuration would expose one identity with unversioned endpoints (note the lack of ``/v2.0/`` or ``/v3/`` in these URLs): - Service (type: ``identity``) - Endpoint (interface: ``public``, URL: ``http://identity:5000/``) - Endpoint (interface: ``admin``, URL: ``http://identity:35357/``) If you were to perform a ``GET`` against either of these endpoints, you would be greeted by an ``HTTP/1.1 300 Multiple Choices`` response, which newer Keystone clients can use to automatically detect available API versions. .. code-block:: bash $ curl -i http://identity:35357/ HTTP/1.1 300 Multiple Choices Vary: X-Auth-Token Content-Type: application/json Content-Length: 755 Date: Tue, 10 Jun 2014 14:22:26 GMT {"versions": {"values": [ ... ]}} With unversioned ``identity`` endpoints in the service catalog, you should be able to `authenticate with keystoneclient`_ successfully. .. _`latest sample configuration`: https://git.openstack.org/cgit/openstack/keystone/tree/etc/keystone-paste.ini .. _`authenticate with keystoneclient`: http://docs.openstack.org/developer/python-keystoneclient/using-api-v3.html#authenticating I have a Python client ---------------------- The Keystone community provides first-class support for Python API consumers via our client library, `python-keystoneclient`_. If you're not currently using this library, you should, as it is intended to expose all of our HTTP API functionality. If we're missing something you're looking for, please contribute! Adopting `python-keystoneclient`_ should be the easiest way to migrate to Identity API v3. .. _`python-keystoneclient`: https://pypi.python.org/pypi/python-keystoneclient/ I have a non-Python client -------------------------- You'll likely need to heavily reference our `API documentation`_ to port your application to Identity API v3. .. _`API documentation`: https://git.openstack.org/cgit/openstack-attic/identity-api/tree/v3/src/markdown/identity-api-v3.md The most common operation would be password-based authentication including a tenant name (i.e. project name) to specify an authorization scope. In Identity API v2.0, this would be a request to ``POST /v2.0/tokens``: .. code-block:: javascript { "auth": { "passwordCredentials": { "password": "my-password", "username": "my-username" }, "tenantName": "project-x" } } And you would get back a JSON blob with an ``access`` -> ``token`` -> ``id`` that you could pass to another web service as your ``X-Auth-Token`` header value. In Identity API v3, an equivalent request would be to ``POST /v3/auth/tokens``: .. code-block:: javascript { "auth": { "identity": { "methods": [ "password" ], "password": { "user": { "domain": { "id": "default" }, "name": "my-username", "password": "my-password" } } }, "scope": { "project": { "domain": { "id": "default" }, "name": "project-x" } } } } Note a few key differences when compared to the v2.0 API: - A "tenant" in v2.0 became a "project" in v3. - The authentication method (``password``) is explicitly identified. - Both the user name (``my-username``) and project name (``project-x``) are namespaced by an owning domain (where ``id`` = ``default``). The "default" domain exists by default in Keystone, and automatically owns the namespace exposed by Identity API v2.0. Alternatively, you may reference users and projects that exist outside the namespace of the default domain, which are thus inaccessible to the v2.0 API. - In v3, your token is returned to you in an ``X-Subject-Token`` header, instead of as part of the request body. You should still authenticate yourself to other services using the ``X-Auth-Token`` header. HTTP/1.1 Chunked Encoding ========================= .. WARNING:: Running Keystone under HTTPD in the recommended (and tested) configuration does not support the use of ``Transfer-Encoding: chunked``. This is due to a limitation with the WSGI spec and the implementation used by ``mod_wsgi``. Support for chunked encoding under ``eventlet`` may or may not continue. It is recommended that all clients assume Keystone will not support ``Transfer-Encoding: chunked``. keystone-9.0.0/doc/source/installing.rst0000664000567000056710000001027712701407102021466 0ustar jenkinsjenkins00000000000000.. Copyright 2012 OpenStack Foundation Copyright 2012 Nebula, Inc All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =================== Installing Keystone =================== This document describes how to install Keystone in order to use it. If you are intending to develop on or with Keystone, please read :doc:`developing` and :doc:`devref/development.environment`. Installing from Source ---------------------- The source install instructions specifically avoid using platform specific packages, instead using the source for the code and the Python Package Index (PyPi_). .. _PyPi: http://pypi.python.org/pypi It's expected that your system already has python_, pip_, and git_ available. .. _python: http://www.python.org .. _pip: http://www.pip-installer.org/en/latest/installing.html .. _git: http://git-scm.com/ Clone the Keystone repository: .. code-block:: bash $ git clone https://git.openstack.org/openstack/keystone.git $ cd keystone Install the Keystone web service: .. code-block:: bash $ pip install . .. NOTE:: This step is guaranteed to fail if you do not have the proper binary dependencies already installed on your development system. Maintaining a list of platform-specific dependencies is outside the scope of this documentation, but is within scope of DEVSTACK_. You should have all the pieces you need to run Keystone installed on your system. The following commands should be available on the command-line path: * ``keystone`` the Keystone client, used to interact with Keystone * ``keystone-manage`` used to bootstrap Keystone data * ``keystone-all`` used to run the Keystone services You will find sample configuration files in ``etc/``: * ``keystone.conf`` * ``keystone-paste.ini`` * ``logging.conf`` * ``policy.json`` * ``default_catalog.templates`` From here, refer to :doc:`configuration` to choose which backend drivers to enable and use. Once configured, you should be able to run Keystone by issuing the command: .. code-block:: bash $ keystone-all By default, this will show logging on the console from which it was started. Once started, you can initialize data in Keystone for use with the rest of OpenStack, as described in :doc:`configuringservices`. An excellent reference implementation of setting up Keystone is DEVSTACK_, most commonly used for development and testing setup of not only Keystone, but all of the core OpenStack projects. .. _DEVSTACK: http://docs.openstack.org/developer/devstack/ The script with the latest examples of initializing data in Keystone is a bash script called `lib/keystone`_ .. _lib/keystone: https://git.openstack.org/cgit/openstack-dev/devstack/tree/lib/keystone Installing from packages: Ubuntu -------------------------------- To install keystone on Ubuntu: .. code-block:: bash $ sudo apt-get install keystone In using Ubuntu's packages, the packages will set up a user account for the Keystone service (`keystone`), and place default configurations in ``/etc/keystone``. As of this writing, the defaults for Keystone backends are all SQL based, stored locally in SQLite. Once installed, you still need to initialize data in Keystone, which you can find described in :doc:`configuringservices`. Installing from packages: Fedora -------------------------------- To install Keystone on Fedora refer to the steps found in the `OpenStack Install Guide`_. To install the packages: .. code-block:: bash $ sudo yum install openstack-keystone Once installed, you still need to initialize data in Keystone, which you can find described in :doc:`configuringservices`. .. _`OpenStack Install Guide`: http://docs.openstack.org/liberty/install-guide-rdo/keystone-install.html keystone-9.0.0/doc/source/key_terms.rst0000664000567000056710000001251612701407102021322 0ustar jenkinsjenkins00000000000000========= Key Terms ========= This document describes the different resource types that are available in OpenStack's Identity Service. Identity ======== The Identity portion of keystone includes ``Users`` and ``Groups``, and may be backed by SQL or more commonly LDAP. Users ----- ``Users`` represent an individual API consumer. A user itself must be owned by a specific domain, and hence all user names are **not** globally unique, but only unique to their domain. Groups ------ ``Groups`` are a container representing a collection of users. A group itself must be owned by a specific domain, and hence all group names are **not** globally unique, but only unique to their domain. Resources ========= The Resources portion of keystone includes ``Projects`` and ``Domains``, and are commonly stored in an SQL backend. Projects (Tenants) ------------------ ``Projects`` (known as Tenants in v2.0) represent the base unit of ``ownership`` in OpenStack, in that all resources in OpenStack should be owned by a specific project. A project itself must be owned by a specific domain, and hence all project names are **not** globally unique, but unique to their domain. If the domain for a project is not specified, then it is added to the default domain. Domains ------- ``Domains`` are a high-level container for projects, users and groups. Each is owned by exactly one domain. Each domain defines a namespace where certain an API-visible name attribute exists. keystone provides a default domain, aptly named 'Default'. In the Identity v3 API, the uniqueness of attributes is as follows: - Domain Name. Globally unique across all domains. - Role Name. Globally unique across all domains. - User Name. Unique within the owning domain. - Project Name. Unique within the owning domain. - Group Name. Unique within the owning domain. Due to their container architecture, domains may be used as a way to delegate management of OpenStack resources. A user in a domain may still access resources in another domain, if an appropriate assignment is granted. Assignment ========== Roles ----- ``Roles`` dictate the level of authorization the end user can obtain. Roles can be granted at either the domain or project level. Role can be assigned to the individual user or at the group level. Role names are globally unique. Role Assignments ---------------- A 3-tuple that has a ``Role``, a ``Resource`` and an ``Identity``. What's needed to Authenticate? ============================== Two pieces of information are required to authenticate with keystone, a bit of ``Resource`` information and a bit of ``Identity``. Take the following call POST data for instance: .. code-block:: javascript { "auth": { "identity": { "methods": [ "password" ], "password": { "user": { "id": "0ca8f6", "password": "secretsecret" } } }, "scope": { "project": { "id": "263fd9" } } } } The user (ID of 0ca8f6) is attempting to retrieve a token that is scoped to project (ID of 263fd9). To perform the same call with names instead of IDs, we now need to supply information about the domain. This is because usernames are only unique within a given domain, but user IDs are supposed to be unique across the deployment. Thus, the auth request looks like the following: .. code-block:: javascript { "auth": { "identity": { "methods": [ "password" ], "password": { "user": { "domain": { "name": "acme" } "name": "userA", "password": "secretsecret" } } }, "scope": { "project": { "domain": { "id": "1789d1" }, "name": "project-x" } } } } For both the user and the project portion, we must supply either a domain ID or a domain name, in order to properly determine the correct user and project. Alternatively, if we wanted to represent this as environment variables for a command line, it would be: .. code-block:: bash $ export OS_PROJECT_DOMAIN_ID=1789d1 $ export OS_USER_DOMAIN_NAME=acme $ export OS_USERNAME=userA $ export OS_PASSWORD=secretsecret $ export OS_PROJECT_NAME=project-x Note that the project the user it attempting to access must be in the same domain as the user. What is Scope? ============== Scope is an overloaded term. In reference to authenticating, as seen above, scope refers to the portion of the POST data that dictates what ``Resource`` (project or domain) the user wants to access. In reference to tokens, scope refers to the effectiveness of a token, i.e.: a `project-scoped` token is only useful on the project it was initially granted for. A `domain-scoped` token may be used to perform domain-related function. In reference to users, groups, and projects, scope often refers to the domain that the entity is owned by. i.e.: a user in domain X is scoped to domain X. keystone-9.0.0/doc/source/mapping_schema.rst0000664000567000056710000001205212701407102022266 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================= Mapping Schema for Federation ============================= Description ----------- The schema for mapping is a description of how a mapping should be created. It shows all the requirements and possibilities for a JSON to be used for mapping. Mapping schema is validated with `JSON Schema `__ Mapping Schema -------------- The rules supported must use the following schema: .. code-block:: javascript { "type": "object", "required": ['rules'], "properties": { "rules": { "minItems": 1, "type": "array", "items": { "type": "object", "required": ['local', 'remote'], "additionalProperties": False, "properties": { "local": { "type": "array" }, "remote": { "minItems": 1, "type": "array", "items": { "type": "object", "oneOf": [ {"$ref": "#/definitions/empty"}, {"$ref": "#/definitions/any_one_of"}, {"$ref": "#/definitions/not_any_of"}, {"$ref": "#/definitions/blacklist"}, {"$ref": "#/definitions/whitelist"} ], } } } } } }, "definitions": { "empty": { "type": "object", "required": ['type'], "properties": { "type": { "type": "string" }, }, "additionalProperties": False, }, "any_one_of": { "type": "object", "additionalProperties": False, "required": ['type', 'any_one_of'], "properties": { "type": { "type": "string" }, "any_one_of": { "type": "array" }, "regex": { "type": "boolean" } } }, "not_any_of": { "type": "object", "additionalProperties": False, "required": ['type', 'not_any_of'], "properties": { "type": { "type": "string" }, "not_any_of": { "type": "array" }, "regex": { "type": "boolean" } } }, "blacklist": { "type": "object", "additionalProperties": False, "required": ['type', 'blacklist'], "properties": { "type": { "type": "string" }, "blacklist": { "type": "array" } } }, "whitelist": { "type": "object", "additionalProperties": False, "required": ['type', 'whitelist'], "properties": { "type": { "type": "string" }, "whitelist": { "type": "array" } } } } } .. NOTE:: ``"additionalProperties": False``, shows that only the properties shown can be displayed. .. code-block:: javascript "whitelist": { "type": "object", "additionalProperties": False, "required": ['type', 'whitelist'], "properties": { "type": { "type": "string" }, "whitelist": { "type": "array" } } } Keystone will not accept any other keys in the JSON mapping other than ``type``, and ``whitelist``. keystone-9.0.0/doc/source/extensions.rst0000664000567000056710000000315312701407102021514 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ========== Extensions ========== Status ====== An extension may be considered ``stable``, ``experimental`` or ``out-of-tree``. * A `stable` status indicates that an extension is fully supported by the OpenStack Identity team. * An `experimental` status indicates that although the intention is to keep the API unchanged, we reserve the right to change it up until the point that it is deemed `stable`. * An `out-of-tree` status indicates that no formal support will be provided. Graduation Process ================== By default, major new functionality that is proposed to be in-tree will start off in `experimental` status. Typically it would take at minimum of one cycle to transition from `experimental` to `stable`, although in special cases this might happened within a cycle. Removal Process =============== It is not intended that functionality should stay in experimental for a long period, functionality that stays `experimental` for more than **two** releases would be expected to make a transition to either `stable` or `out-of-tree`. keystone-9.0.0/doc/source/developing_drivers.rst0000664000567000056710000001276112701407102023214 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =========================== Developing Keystone Drivers =========================== A driver, also known as a backend, is an important architectural component of Keystone. It is an abstraction around the data access needed by a particular subsystem. This pluggable implementation is not only how Keystone implements its own data access, but how you can implement your own! Each major subsystem (that has data access needs) implements the data access by using drivers. Some examples of Keystone's drivers: - :class:`keystone.identity.backends.ldap.Identity` - :class:`keystone.token.providers.fernet.core.Provider` - :class:`keystone.contrib.federation.backends.sql.Federation` In/Out of Tree -------------- It's best to start developing your custom driver outside of the Keystone development process. This means developing it in your own public or private git repository and not worrying about getting it upstream (for now). This is better for you because it gives you more freedom and you are not bound to the strict OpenStack development rules or schedule. You can iterate faster and take whatever shortcuts you need to get your product out of the door. This is also good for Keystone because it will limit the amount of drivers that must be maintained by the team. If the team had to maintain a driver for each NoSQL DB that deployers want to use in production there would be less time to make Keystone itself better. Not to mention that the team would have to start gaining expertise in potentially dozens of new technologies. As you'll see below there is no penalty for open sourcing your driver, on GitHub for example, or even keeping your implementation private. We use `Setuptools entry points`_ to load your driver from anywhere in the Python path. .. _Setuptools entry points: no good resource? How To Make a Driver -------------------- The TLDR; steps (and too long didn't write yet): 1. Determine which subsystem you would like write a driver for 2. Subclass the most current version of the driver interface 3. Implement each of the abstract methods for that driver a. We are currently not documenting the exact input/outputs of the driver methods. The best approach right now is to use an existing driver as an example of what data your driver will receive and what data your driver will be required to return. b. There is a plan in place to document these APIs in more detail. 4. Register your new driver as an entry point 5. Configure your new driver in ``keystone.conf`` 6. Sit back and enjoy! Driver Versioning ----------------- In the past the driver class was named ``Driver`` and changes would sometimes be devastating to developers that depend on our driver contracts. To help alleviate some of the issues we are now creating version driver classes, e.g. ``DriverV8``. We'll be supporting the current driver version for at least one version back. This gives developers a full cycle to update their drivers. Some cases, such as critical security flaws, may require a change to be introduced that breaks compatibility. These special cases will be communicated as widely as possible via the typical OpenStack communication channels. As new driver interface versions are added old ones will be moved to a "deprecated" state and will output deprecation messages when used. When a driver version moves from "deprecated" to "unsupported" it will be removed from the keystone source tree. Removing Methods ~~~~~~~~~~~~~~~~ Newer driver interfaces may remove methods that are currently required. Methods are removed when they are no longer required or invoked by Keystone. There is no reason why methods removed from the Keystone interface need to be removed from custom drivers. Adding Methods -------------- The most common API changes will be adding method to support new features. We'll do our best to add methods in a way that is backward compatible. The new version of the driver will define the new method as an ``abc.abstractmethod`` that must be implemented by driver implementations. When possible we'll also go back to our supported drivers and add the method, with a default implementation. For example, given a ``thing.DriverV8`` that added a new method ``list_things_by_name()``, we will go back to ``thing.DriverV7`` and implement that method. This is good because in many cases your driver will just work, but there are a couple of unfortunate side effects. First if you have already used that method name you will have to rename your method and cut a new version. Second is that the default implementation may cause a performance penalty due to its naive implementation. Updating Methods ~~~~~~~~~~~~~~~~ We will try not to update existing methods in ways that will break old driver implementations. That means that: * We will respect existing parameters and not just delete them. If they are to be removed we will respect their behavior and deprecate them in older versions. * We will add new parameters as optional with backward compatible defaults. keystone-9.0.0/doc/source/mapping_combinations.rst0000664000567000056710000004454312701407102023525 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =================================== Mapping Combinations for Federation =================================== ----------- Description ----------- Mapping adds a set of rules to map federation attributes to Keystone users and/or groups. An Identity Provider has exactly one mapping specified per protocol. Mapping objects can be used multiple times by different combinations of Identity Provider and Protocol. ----------- Definitions ----------- A rule hierarchy looks as follows: .. code-block:: javascript { "rules": [ { "local": [ { " or " } ], "remote": [ { "" } ] } ] } * `rules`: top-level list of rules. * `local`: a rule containing information on what local attributes will be mapped. * `remote`: a rule containing information on what remote attributes will be mapped. * ``: contains information on conditions that allow a rule, can only be set in a `remote` rule. ------------- Mapping Rules ------------- Mapping Engine -------------- The mapping engine can be tested before creating a federated setup. It can be tested with the ``keystone-manage mapping_engine`` command: .. code-block:: bash $ keystone-manage mapping_engine --rules --input Mapping Conditions ------------------ Mappings support 5 different types of conditions: ``empty``: The rule is matched to all claims containing the remote attribute type. This condition does not need to be specified. ``any_one_of``: The rule is matched only if any of the specified strings appear in the remote attribute type. Condition result is boolean, not the argument that is passed as input. ``not_any_of``: The rule is not matched if any of the specified strings appear in the remote attribute type. Condition result is boolean, not the argument that is passed as input. ``blacklist``: The rule allows all except a specified set of groups. Condition result is the argument(s) passed as input minus what was matched in the blacklist. ``whitelist``: The rules allows a specified set of groups. Condition result is the argument(s) passed as input and is/are also present in the whitelist. .. NOTE:: ``empty``, ``blacklist`` and ``whitelist`` are the only conditions that can be used in direct mapping ({0}, {1}, etc.) You can combine multiple conditions in a single rule. The schema that needs to be followed for the mapping rules can be seen in the :doc:`mapping_schema` page. Mappings Examples ----------------- The following are all examples of mapping rule types. empty condition ~~~~~~~~~~~~~~~ .. code-block:: javascript { "rules": [ { "local": [ { "user": { "name": "{0} {1}", "email": "{2}" }, "group": { "name": "{3}" } } ], "remote": [ { "type": "FirstName" }, { "type": "LastName" }, { "type": "Email" }, { "type": "OIDC_GROUPS" } ] } ] } .. NOTE:: The numbers in braces {} are indices, they map in order. For example:: - Mapping to user with the name matching the value in remote attribute FirstName - Mapping to user with the name matching the value in remote attribute LastName - Mapping to user with the email matching value in remote attribute Email - Mapping to a group(s) with the name matching the value(s) in remote attribute OIDC_GROUPS Groups can have multiple values. Each value must be separated by a `;` Example: OIDC_GROUPS=developers;testers other conditions ~~~~~~~~~~~~~~~~ In ```` shown below, please supply one of the following: ``any_one_of``, or ``not_any_of``. .. code-block:: javascript { "rules": [ { "local": [ { "user": { "name": "{0}" }, "group": { "id": "0cd5e9" } } ], "remote": [ { "type": "UserName" }, { "type": "HTTP_OIDC_GROUPIDS", "": [ "HTTP_OIDC_EMAIL" ] } ] } ] } In ```` shown below, please supply one of the following: ``blacklist``, or ``whitelist``. .. code-block:: javascript { "rules": [ { "local": [ { "user": { "name": "{0}" } }, { "groups": "{1}", "domain": { "id": "0cd5e9" } } ], "remote": [ { "type": "UserName" }, { "type": "HTTP_OIDC_GROUPIDS", "": [ "me@example.com" ] } ] } ] } .. NOTE:: If the user id and name are not specified in the mapping, the server tries to directly map ``REMOTE_USER`` environment variable. If this variable is also unavailable the server returns an HTTP 401 Unauthorized error. Group ids and names can be provided in the local section: .. code-block:: javascript { "local": [ { "group": { "id":"0cd5e9" } } ] } .. code-block:: javascript { "local": [ { "group": { "name": "developer_group", "domain": { "id": "abc1234" } } } ] } .. code-block:: javascript { "local": [ { "group": { "name": "developer_group", "domain": { "name": "private_cloud" } } } ] } Output ------ If a mapping is valid you will receive the following output: .. code-block:: javascript { "group_ids": "[]", "user": { "domain": { "id": "Federated" or "" }, "type": "ephemeral" or "local", "name": "", "id": "" }, "group_names": [ { "domain": { "name": "" }, "name": { "name": "[]" } } { "domain": { "name": "" }, "name": { "name": "[]" } } ] } The ``type`` parameter specifies the type of user being mapped. The 2 possible user types are ``local`` and ``ephemeral``.``local`` is displayed if the user has a domain specified. The user is treated as existing in the backend, hence the server will fetch user details (id, name, roles, groups).``ephemeral`` is displayed for a user that does not exist in the backend. The ``id`` parameter in the service domain specifies the domain a user belongs to. ``Federated`` will be displayed if no domain is specified in the local rule. User is deemed ephemeral and becomes a member of service domain named ``Federated``. If the domain is specified the local domain's id will be displayed. If the mapped user is local, mapping engine will discard further group assigning and return set of roles configured for the user. .. NOTE:: Domain ``Federated`` is a service domain - it cannot be listed, displayed, added or deleted. There is no need to perform any operation on it prior to federation configuration. Regular Expressions ------------------- Regular expressions can be used in a mapping by specifying the ``regex`` key, and setting it to ``true``. .. code-block:: javascript { "rules": [ { "local": [ { "user": { "name": "{0}" }, "group": { "id": "0cd5e9" } }, ], "remote": [ { "type": "UserName" }, { "type": "HTTP_OIDC_GROUPIDS", "any_one_of": [ ".*@yeah.com$" ] "regex": true } ] } ] } This allows any user with a claim containing a key with any value in ``HTTP_OIDC_GROUPIDS`` to be mapped to group with id ``0cd5e9``. Condition Combinations ---------------------- Combinations of mappings conditions can also be done. ``empty``, ``any_one_of``, and ``not_any_of`` can all be used in the same rule, but cannot be repeated within the same condition. ``any_one_of`` and ``not_any_of`` are mutually exclusive within a condition's scope. So are ``whitelist`` and ``blacklist``. .. code-block:: javascript { "rules": [ { "local": [ { "user": { "name": "{0}" }, "group": { "id": "0cd5e9" } }, ], "remote": [ { "type": "UserName" }, { "type": "cn=IBM_Canada_Lab", "not_any_of": [ ".*@naww.com$" ], "regex": true }, { "type": "cn=IBM_USA_Lab", "any_one_of": [ ".*@yeah.com$" ] "regex": true } ] } ] } As before group names and users can also be provided in the local section. This allows any user with the following claim information to be mapped to group with id 0cd5e9. .. code-block:: javascript {"UserName":"@yeah.com"} {"cn=IBM_USA_Lab":"@yeah.com"} {"cn=IBM_Canada_Lab":"@yeah.com"} The following claims will be mapped: - any claim containing the key UserName. - any claim containing key cn=IBM_Canada_Lab that doesn't have the value @naww.com. - any claim containing key cn=IBM_USA_Lab that has value @yeah.com. Multiple Rules -------------- Multiple rules can also be utilized in a mapping. .. code-block:: javascript { "rules": [ { "local": [ { "user": { "name": "{0}" }, "group": { "name": "non-contractors", "domain": { "id": "abc1234" } } } ], "remote": [ { "type": "UserName" }, { "type": "orgPersonType", "not_any_of": [ "Contractor", "SubContractor" ] } ] }, { "local": [ { "user": { "name": "{0}" }, "group": { "name": "contractors", "domain": { "id": "abc1234" } } } ], "remote": [ { "type": "UserName" }, { "type": "orgPersonType", "any_one_of": [ "Contractor", "SubContractor" ] } ] } ] } The above assigns groups membership basing on ``orgPersonType`` values: - neither ``Contractor`` nor ``SubContractor`` will belong to the ``non-contractors`` group. - either ``Contractor or ``SubContractor`` will belong to the ``contractors`` group. Rules are additive, so permissions will only be granted for the rules that succeed. All the remote conditions of a rule must be valid. When using multiple rules you can specify more than one effective user identification, but only the first match will be considered and the others ignored ordered from top to bottom. Since rules are additive one can specify one user identification and this will also work. The best practice for multiple rules is to create a rule for just user and another rule for just groups. Below is rules example repeated but with global username mapping. .. code-block:: javascript { "rules": [ { "local": [ "user": { "id": "{0}" } ], "remote": [ { "type": "UserType" } ] }, { "local": [ { "group": { "name": "non-contractors", "domain": { "id": "abc1234" } } } ], "remote": [ { "type": "orgPersonType", "not_any_of": [ "Contractor", "SubContractor" ] } ] }, { "local": [ { "group": { "name": "contractors", "domain": { "id": "abc1234" } } } ], "remote": [ { "type": "orgPersonType", "any_one_of": [ "Contractor", "SubContractor" ] } ] } ] } Keystone to Keystone -------------------- Keystone to Keystone federation also utilizes mappings, but has some differences. An attribute file (``/etc/shibboleth/attribute-map.xml``) is used to add attributes to the Keystone Identity Provider. Attributes look as follows: .. code-block:: xml The Keystone Service Provider must contain a mapping as shown below. ``openstack_user``, and ``openstack_user_domain`` match to the attribute names we have in the Identity Provider. It will map any user with the name ``user1`` or ``admin`` in the ``openstack_user`` attribute and ``openstack_domain`` attribute ``default`` to a group with id ``abc1234``. .. code-block:: javascript { rules = [ { "local": [ { "group": { "id": "abc1234" } } ], "remote": [ { "type": "openstack_user", "any_one_of": [ "user1", "admin" ] }, { "type":"openstack_user_domain", "any_one_of": [ "Default" ] } ] } ] } The possible attributes that can be used in a mapping are `openstack_user`, `openstack_user_domain`, `openstack_roles`, `openstack_project`, and `openstack_project_domain`. keystone-9.0.0/doc/source/architecture.rst0000664000567000056710000002506012701407102022000 0ustar jenkinsjenkins00000000000000.. Copyright 2011-2012 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Keystone Architecture ===================== Much of the design is precipitated from the expectation that the auth backends for most deployments will actually be shims in front of existing user systems. ------------ The Services ------------ Keystone is organized as a group of internal services exposed on one or many endpoints. Many of these services are used in a combined fashion by the frontend, for example an authenticate call will validate user/project credentials with the Identity service and, upon success, create and return a token with the Token service. Identity -------- The Identity service provides auth credential validation and data about Users, Groups. In the basic case all this data is managed by the service, allowing the service to manage all the CRUD associated with the data. In other cases from an authoritative backend service. An example of this would be when backending on LDAP. See `LDAP Backend` below for more details. Resource -------- The Resource service provides data about Projects and Domains. Like the Identity service, this data may either be managed directly by the service or be pulled from another authoritative backend service, such as LDAP. Assignment ---------- The Assignment service provides data about Roles and Role assignments to the entities managed by the Identity and Resource services. Again, like these two services, this data may either be managed directly by the Assignment service or be pulled from another authoritative backend service, such as LDAP. Token ----- The Token service validates and manages Tokens used for authenticating requests once a user's credentials have already been verified. Catalog ------- The Catalog service provides an endpoint registry used for endpoint discovery. Policy ------ The Policy service provides a rule-based authorization engine and the associated rule management interface. ------------------------ Application Construction ------------------------ Keystone is an HTTP front-end to several services. Like other OpenStack applications, this is done using python WSGI interfaces and applications are configured together using Paste_. The application's HTTP endpoints are made up of pipelines of WSGI middleware, such as: .. code-block:: ini [pipeline:api_v3] pipeline = sizelimit url_normalize build_auth_context token_auth admin_token_auth json_body ec2_extension_v3 s3_extension service_v3 These in turn use a subclass of :mod:`keystone.common.wsgi.ComposingRouter` to link URLs to Controllers (a subclass of :mod:`keystone.common.wsgi.Application`). Within each Controller, one or more Managers are loaded (for example, see :mod:`keystone.catalog.core.Manager`), which are thin wrapper classes which load the appropriate service driver based on the Keystone configuration. * Assignment * :mod:`keystone.assignment.controllers.GrantAssignmentV3` * :mod:`keystone.assignment.controllers.ProjectAssignmentV3` * :mod:`keystone.assignment.controllers.TenantAssignment` * :mod:`keystone.assignment.controllers.Role` * :mod:`keystone.assignment.controllers.RoleAssignmentV2` * :mod:`keystone.assignment.controllers.RoleAssignmentV3` * :mod:`keystone.assignment.controllers.RoleV3` * Authentication * :mod:`keystone.auth.controllers.Auth` * Catalog * :mod:`keystone.catalog.controllers.EndpointV3` * :mod:`keystone.catalog.controllers.RegionV3` * :mod:`keystone.catalog.controllers.ServiceV3` * Identity * :mod:`keystone.identity.controllers.GroupV3` * :mod:`keystone.identity.controllers.UserV3` * Policy * :mod:`keystone.policy.controllers.PolicyV3` * Resource * :mod:`keystone.resource.controllers.DomainV3` * :mod:`keystone.resource.controllers.ProjectV3` * Token * :mod:`keystone.token.controllers.Auth` .. _Paste: http://pythonpaste.org/ ---------------- Service Backends ---------------- Each of the services can be configured to use a backend to allow Keystone to fit a variety of environments and needs. The backend for each service is defined in the keystone.conf file with the key ``driver`` under a group associated with each service. A general class exists under each backend to provide an abstract base class for any implementations, identifying the expected service implementations. The classes are named after the keystone release in which they were introduced. For eg. ``DriverV8`` for keystone release version 8. The corresponding drivers for the services are: * :mod:`keystone.assignment.core.AssignmentDriverV8` * :mod:`keystone.assignment.core.RoleDriverV8` * :mod:`keystone.catalog.core.CatalogDriverV8` * :mod:`keystone.credential.core.CredentialDriverV8` * :mod:`keystone.endpoint_policy.core.EndpointPolicyDriverV8` * :mod:`keystone.federation.core.FederationDriverV8` * :mod:`keystone.identity.core.IdentityDriverV8` * :mod:`keystone.identity.core.MappingDriverV8` * :mod:`keystone.oauth1.core.Oauth1DriverV8` * :mod:`keystone.policy.core.PolicyDriverV8` * :mod:`keystone.resource.core.DomainConfigDriverV8` * :mod:`keystone.resource.core.ResourceDriverV8` * :mod:`keystone.revoke.core.RevokeDriverV8` * :mod:`keystone.token.core.TokenDriverV8` * :mod:`keystone.trust.core.TrustDriverV8` If you implement a backend driver for one of the Keystone services, you're expected to subclass from these classes. SQL Backend ----------- A SQL based backend using SQLAlchemy to store data persistently. The ``keystone-manage`` command introspects the backends to identify SQL based backends when running "db_sync" to establish or upgrade schema. If the backend driver has a method db_sync(), it will be invoked to sync and/or migrate schema. Templated Backend ----------------- Largely designed for a common use case around service catalogs in the Keystone project, a Catalog backend that simply expands pre-configured templates to provide catalog data. Example paste.deploy config (uses $ instead of % to avoid ConfigParser's interpolation):: [DEFAULT] catalog.RegionOne.identity.publicURL = http://localhost:$(public_port)s/v2.0 catalog.RegionOne.identity.adminURL = http://localhost:$(public_port)s/v2.0 catalog.RegionOne.identity.internalURL = http://localhost:$(public_port)s/v2.0 catalog.RegionOne.identity.name = 'Identity Service' LDAP Backend ------------ The LDAP backend stores Users and Projects in separate Subtrees. Roles are recorded as entries under the Projects. ---------- Data Model ---------- Keystone was designed from the ground up to be amenable to multiple styles of backends and as such many of the methods and data types will happily accept more data than they know what to do with and pass them on to a backend. There are a few main data types: * **User**: has account credentials, is associated with one or more projects or domains * **Group**: a collection of users, is associated with one or more projects or domains * **Project**: unit of ownership in OpenStack, contains one or more users * **Domain**: unit of ownership in OpenStack, contains users, groups and projects * **Role**: a first-class piece of metadata associated with many user-project pairs. * **Token**: identifying credential associated with a user or user and project * **Extras**: bucket of key-value metadata associated with a user-project pair. * **Rule**: describes a set of requirements for performing an action. While the general data model allows a many-to-many relationship between Users and Groups to Projects and Domains; the actual backend implementations take varying levels of advantage of that functionality. ---------------- Approach to CRUD ---------------- While it is expected that any "real" deployment at a large company will manage their users, groups, projects and domains in their existing user systems, a variety of CRUD operations are provided for the sake of development and testing. CRUD is treated as an extension or additional feature to the core feature set in that it is not required that a backend support it. It is expected that backends for services that don't support the CRUD operations will raise a :mod:`keystone.exception.NotImplemented`. ---------------------------------- Approach to Authorization (Policy) ---------------------------------- Various components in the system require that different actions are allowed based on whether the user is authorized to perform that action. For the purposes of Keystone there are only a couple levels of authorization being checked for: * Require that the performing user is considered an admin. * Require that the performing user matches the user being referenced. Other systems wishing to use the policy engine will require additional styles of checks and will possibly write completely custom backends. By default, Keystone leverages Policy enforcement that is maintained in Oslo-Incubator, found in `keystone/openstack/common/policy.py`. Rules ----- Given a list of matches to check for, simply verify that the credentials contain the matches. For example: .. code-block:: python credentials = {'user_id': 'foo', 'is_admin': 1, 'roles': ['nova:netadmin']} # An admin only call: policy_api.enforce(('is_admin:1',), credentials) # An admin or owner call: policy_api.enforce(('is_admin:1', 'user_id:foo'), credentials) # A netadmin call: policy_api.enforce(('roles:nova:netadmin',), credentials) Credentials are generally built from the user metadata in the 'extras' part of the Identity API. So, adding a 'role' to the user just means adding the role to the user metadata. Capability RBAC --------------- (Not yet implemented.) Another approach to authorization can be action-based, with a mapping of roles to which capabilities are allowed for that role. For example: .. code-block:: python credentials = {'user_id': 'foo', 'is_admin': 1, 'roles': ['nova:netadmin']} # add a policy policy_api.add_policy('action:nova:add_network', ('roles:nova:netadmin',)) policy_api.enforce(('action:nova:add_network',), credentials) In the backend this would look up the policy for 'action:nova:add_network' and then do what is effectively a 'Simple Match' style match against the credentials. keystone-9.0.0/doc/source/event_notifications.rst0000664000567000056710000004157512701407102023401 0ustar jenkinsjenkins00000000000000 .. Copyright 2013 IBM Corp. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================ Keystone Event Notifications ============================ Keystone provides notifications about usage data so that 3rd party applications can use the data for billing, monitoring, or quota purposes. This document describes the current inclusions and exclusions for Keystone notifications. Keystone currently supports two notification formats: a Basic Notification, and a Cloud Auditing Data Federation (`CADF`_) Notification. The supported operations between the two types of notification formats are documented below. Common Notification Structure ============================= Notifications generated by Keystone are generated in JSON format. An external application can format them into ATOM format and publish them as a feed. Currently, all notifications are immediate, meaning they are generated when a specific event happens. Notifications all adhere to a specific top level format: .. code-block:: javascript { "event_type": "identity..", "message_id": "", "payload": {}, "priority": "INFO", "publisher_id": "identity.", "timestamp": "" } Where ```` is a Keystone resource, such as user or project, and ```` is a Keystone operation, such as created, deleted. The key differences between the two notification formats (Basic and CADF), lie within the ``payload`` portion of the notification. The ``priority`` of the notification being sent is not configurable through the Keystone configuration file. This value is defaulted to INFO for all notifications sent in Keystone's case. Basic Notifications =================== All basic notifications contain a limited amount of information, specifically, just the resource type, operation, and resource id. The ``payload`` portion of a Basic Notification is a single key-value pair. .. code-block:: javascript { "resource_info": } Where ```` is the unique identifier assigned to the ``resource_type`` that is undergoing the ````. Supported Events ---------------- The following table displays the compatibility between resource types and operations. ======================== ================================= resource type supported operations ======================== ================================= group create, update, delete project create, update, delete role create, update, delete domain create, update, delete user create, update, delete trust create, delete region create, update, delete endpoint create, update, delete service create, update, delete policy create, update, delete ======================== ================================= Note, ``trusts`` are an immutable resource, they do not support ``update`` operations. Example Notification -------------------- This is an example of a notification sent for a newly created user: .. code-block:: javascript { "event_type": "identity.user.created", "message_id": "0156ee79-b35f-4cef-ac37-d4a85f231c69", "payload": { "resource_info": "671da331c47d4e29bb6ea1d270154ec3" }, "priority": "INFO", "publisher_id": "identity.host1234", "timestamp": "2013-08-29 19:03:45.960280" } If the operation fails, the notification won't be sent, and no special error notification will be sent. Information about the error is handled through normal exception paths. Auditing with CADF ================== Keystone uses the `PyCADF`_ library to emit CADF notifications, these events adhere to the DMTF `CADF`_ specification. This standard provides auditing capabilities for compliance with security, operational, and business processes and supports normalized and categorized event data for federation and aggregation. .. _PyCADF: http://docs.openstack.org/developer/pycadf .. _CADF: http://www.dmtf.org/standards/cadf CADF notifications include additional context data around the ``resource``, the ``action`` and the ``initiator``. CADF notifications may be emitted by changing the ``notification_format`` to ``cadf`` in the configuration file. The ``payload`` portion of a CADF Notification is a CADF ``event``, which is represented as a JSON dictionary. For example: .. code-block:: javascript { "typeURI": "http://schemas.dmtf.org/cloud/audit/1.0/event", "initiator": { "typeURI": "service/security/account/user", "host": { "agent": "curl/7.22.0(x86_64-pc-linux-gnu)", "address": "127.0.0.1" }, "id": "" }, "target": { "typeURI": "", "id": "openstack:1c2fc591-facb-4479-a327-520dade1ea15" }, "observer": { "typeURI": "service/security", "id": "openstack:3d4a50a9-2b59-438b-bf19-c231f9c7625a" }, "eventType": "activity", "eventTime": "2014-02-14T01:20:47.932842+00:00", "action": "", "outcome": "success", "id": "openstack:f5352d7b-bee6-4c22-8213-450e7b646e9f", } Where the following are defined: * ````: ID of the user that performed the operation * ````: CADF specific target URI, (i.e.: data/security/project) * ````: The action being performed, typically: ````. ```` Additionally there may be extra keys present depending on the operation being performed, these will be discussed below. Note, the ``eventType`` property of the CADF payload is different from the ``event_type`` property of a notifications. The former (``eventType``) is a CADF keyword which designates the type of event that is being measured, this can be: `activity`, `monitor` or `control`. Whereas the latter (``event_type``) is described in previous sections as: `identity..` Supported Events ---------------- The following table displays the compatibility between resource types and operations. ====================== ============================= ============================= resource type supported operations typeURI ====================== ============================= ============================= group create, update, delete data/security/group project create, update, delete data/security/project role create, update, delete data/security/role domain create, update, delete data/security/domain user create, update, delete data/security/account/user trust create, delete data/security/trust region create, update, delete data/security/region endpoint create, update, delete data/security/endpoint service create, update, delete data/security/service policy create, update, delete data/security/policy role assignment add, remove data/security/account/user None authenticate data/security/account/user ====================== ============================= ============================= Example Notification - Project Create ------------------------------------- The following is an example of a notification that is sent when a project is created. This example can be applied for any ``create``, ``update`` or ``delete`` event that is seen in the table above. The ```` and ``typeURI`` fields will be change. The difference to note is the inclusion of the ``resource_info`` field which contains the ```` that is undergoing the operation. Thus creating a common element between the CADF and Basic notification formats. .. code-block:: javascript { "event_type": "identity.project.created", "message_id": "0156ee79-b35f-4cef-ac37-d4a85f231c69", "payload": { "typeURI": "http://schemas.dmtf.org/cloud/audit/1.0/event", "initiator": { "typeURI": "service/security/account/user", "host": { "agent": "curl/7.22.0(x86_64-pc-linux-gnu)", "address": "127.0.0.1" }, "id": "c9f76d3c31e142af9291de2935bde98a" }, "target": { "typeURI": "data/security/project", "id": "openstack:1c2fc591-facb-4479-a327-520dade1ea15" }, "observer": { "typeURI": "service/security", "id": "openstack:3d4a50a9-2b59-438b-bf19-c231f9c7625a" }, "eventType": "activity", "eventTime": "2014-02-14T01:20:47.932842+00:00", "action": "created.project", "outcome": "success", "id": "openstack:f5352d7b-bee6-4c22-8213-450e7b646e9f", "resource_info": "671da331c47d4e29bb6ea1d270154ec3" } "priority": "INFO", "publisher_id": "identity.host1234", "timestamp": "2013-08-29 19:03:45.960280" } Example Notification - Authentication ------------------------------------- The following is an example of a notification that is sent when a user authenticates with Keystone. Note that this notification will be emitted if a user successfully authenticates, and when a user fails to authenticate. .. code-block:: javascript { "event_type": "identity.authenticate", "message_id": "1371a590-d5fd-448f-b3bb-a14dead6f4cb", "payload": { "typeURI": "http://schemas.dmtf.org/cloud/audit/1.0/event", "initiator": { "typeURI": "service/security/account/user", "host": { "agent": "curl/7.22.0(x86_64-pc-linux-gnu)", "address": "127.0.0.1" }, "id": "c9f76d3c31e142af9291de2935bde98a" }, "target": { "typeURI": "service/security/account/user", "id": "openstack:1c2fc591-facb-4479-a327-520dade1ea15" }, "observer": { "typeURI": "service/security", "id": "openstack:3d4a50a9-2b59-438b-bf19-c231f9c7625a" }, "eventType": "activity", "eventTime": "2014-02-14T01:20:47.932842+00:00", "action": "authenticate", "outcome": "success", "id": "openstack:f5352d7b-bee6-4c22-8213-450e7b646e9f" }, "priority": "INFO", "publisher_id": "identity.host1234", "timestamp": "2014-02-14T01:20:47.932842" } Example Notification - Federated Authentication ----------------------------------------------- The following is an example of a notification that is sent when a user authenticates with Keystone via Federation. This example is similar to the one seen above, however the ``initiator`` portion of the ``payload`` contains a new ``credential`` section. .. code-block:: javascript { "event_type": "identity.authenticate", "message_id": "1371a590-d5fd-448f-b3bb-a14dead6f4cb", "payload": { "typeURI": "http://schemas.dmtf.org/cloud/audit/1.0/event", "initiator": { "credential": { "type": "http://docs.oasis-open.org/security/saml/v2.0", "token": "671da331c47d4e29bb6ea1d270154ec3", "identity_provider": "ACME", "user": "c9f76d3c31e142af9291de2935bde98a", "groups": [ "developers" ] }, "typeURI": "service/security/account/user", "host": { "agent": "curl/7.22.0(x86_64-pc-linux-gnu)", "address": "127.0.0.1" }, "id": "c9f76d3c31e142af9291de2935bde98a" }, "target": { "typeURI": "service/security/account/user", "id": "openstack:1c2fc591-facb-4479-a327-520dade1ea15" }, "observer": { "typeURI": "service/security", "id": "openstack:3d4a50a9-2b59-438b-bf19-c231f9c7625a" }, "eventType": "activity", "eventTime": "2014-02-14T01:20:47.932842+00:00", "action": "authenticate", "outcome": "success", "id": "openstack:f5352d7b-bee6-4c22-8213-450e7b646e9f" }, "priority": "INFO", "publisher_id": "identity.host1234", "timestamp": "2014-02-14T01:20:47.932842" } Example Notification - Role Assignment -------------------------------------- The following is an example of a notification that is sent when a role is granted or revoked to a project or domain, for a user or group. It is important to note that this type of notification has many new keys that convey the necessary information. Expect the following in the ``payload``: ``role``, ``inherited_to_project``, ``project`` or ``domain``, ``user`` or ``group``. With the exception of ``inherited_to_project``, each will represent the unique identifier of the resource type. .. code-block:: javascript { "event_type": "identity.role_assignment.created", "message_id": "a5901371-d5fd-b3bb-448f-a14dead6f4cb", "payload": { "typeURI": "http://schemas.dmtf.org/cloud/audit/1.0/event", "initiator": { "typeURI": "service/security/account/user", "host": { "agent": "curl/7.22.0(x86_64-pc-linux-gnu)", "address": "127.0.0.1" }, "id": "c9f76d3c31e142af9291de2935bde98a" }, "target": { "typeURI": "service/security/account/user", "id": "openstack:1c2fc591-facb-4479-a327-520dade1ea15" }, "observer": { "typeURI": "service/security", "id": "openstack:3d4a50a9-2b59-438b-bf19-c231f9c7625a" }, "eventType": "activity", "eventTime": "2014-08-20T01:20:47.932842+00:00", "role": "0e6b990380154a2599ce6b6e91548a68", "project": "24bdcff1aab8474895dbaac509793de1", "inherited_to_projects": false, "group": "c1e22dc67cbd469ea0e33bf428fe597a", "action": "created.role_assignment", "outcome": "success", "id": "openstack:f5352d7b-bee6-4c22-8213-450e7b646e9f" }, "priority": "INFO", "publisher_id": "identity.host1234", "timestamp": "2014-08-20T01:20:47.932842" } Recommendations for consumers ============================= One of the most important notifications that Keystone emits is for project deletions (``event_type`` = ``identity.project.deleted``). This event should indicate to the rest of OpenStack that all resources (such as virtual machines) associated with the project should be deleted. Projects can also have update events (``event_type`` = ``identity.project.updated``), wherein the project has been disabled. Keystone ensures this has an immediate impact on the accessibility of the project's resources by revoking tokens with authorization on the project, but should **not** have a direct impact on the projects resources (in other words, virtual machines should **not** be deleted). Opting out of certain notifications =================================== There are many notifications that Keystone emits and some deployers may only care about certain events. In Keystone there is a way to opt-out of certain notifications. In ``/etc/keystone/keystone.conf`` you can set ``opt_out`` to the event you wish to opt-out of. It is possible to opt-out of multiple events. Example: .. code-block:: ini [DEFAULT] notification_opt_out = identity.user.created notification_opt_out = identity.role_assignment.created notification_opt_out = identity.authenticate.pending This will opt-out notifications for user creation, role assignment creation and successful authentications. For a list of event types that can be used, refer to: `Telemetry Measurements`_. .. _Telemetry Measurements: http://docs.openstack.org/admin-guide-cloud/telemetry-measurements.html#openstack-identity keystone-9.0.0/doc/source/policy_mapping.rst0000664000567000056710000004367412701407102022343 0ustar jenkinsjenkins00000000000000=============================== Mapping of policy target to API =============================== The following table shows the target in the policy.json file for each API. ========================================================= === Target API ========================================================= === identity:get_region GET /v3/regions/{region_id} identity:list_regions GET /v3/regions identity:create_region POST /v3/regions identity:update_region PATCH /v3/regions/{region_id} identity:delete_region DELETE /v3/regions/{region_id} identity:get_service GET /v3/services/{service_id} identity:list_services GET /v3/services identity:create_service POST /v3/services identity:update_service PATCH /v3/services/{service__id} identity:delete_service DELETE /v3/services/{service__id} identity:get_endpoint GET /v3/endpoints/{endpoint_id} identity:list_endpoints GET /v3/endpoints identity:create_endpoint POST /v3/endpoints identity:update_endpoint PATCH /v3/endpoints/{endpoint_id} identity:delete_endpoint DELETE /v3/endpoints/{endpoint_id} identity:get_domain GET /v3/domains/{domain_id} identity:list_domains GET /v3/domains identity:create_domain POST /v3/domains identity:update_domain PATCH /v3/domains/{domain_id} identity:delete_domain DELETE /v3/domains/{domain_id} identity:get_project GET /v3/projects/{project_id} identity:list_projects GET /v3/projects identity:list_user_projects GET /v3/users/{user_id}/projects identity:create_project POST /v3/projects identity:update_project PATCH /v3/projects/{project_id} identity:delete_project DELETE /v3/projects/{project_id} identity:get_user GET /v3/users/{user_id} identity:list_users GET /v3/users identity:create_user POST /v3/users identity:update_user PATCH /v3/users/{user_id} identity:delete_user DELETE /v3/users/{user_id} identity:change_password POST /v3/users/{user_id}/password identity:get_group GET /v3/groups/{group_id} identity:list_groups GET /v3/groups identity:list_groups_for_user GET /v3/users/{user_id}/groups identity:create_group POST /v3/groups identity:update_group PATCH /v3/groups/{group_id} identity:delete_group DELETE /v3/groups/{group_id} identity:list_users_in_group GET /v3/groups/{group_id}/users identity:remove_user_from_group DELETE /v3/groups/{group_id}/users/{user_id} identity:check_user_in_group GET /v3/groups/{group_id}/users/{user_id} identity:add_user_to_group PUT /v3/groups/{group_id}/users/{user_id} identity:get_credential GET /v3/credentials/{credential_id} identity:list_credentials GET /v3/credentials identity:create_credential POST /v3/credentials identity:update_credential PATCH /v3/credentials/{credential_id} identity:delete_credential DELETE /v3/credentials/{credential_id} identity:ec2_get_credential GET /v3/users/{user_id}/credentials/OS-EC2/{credential_id} identity:ec2_list_credentials GET /v3/users/{user_id}/credentials/OS-EC2 identity:ec2_create_credential POST /v3/users/{user_id}/credentials/OS-EC2 identity:ec2_delete_credential DELETE /v3/users/{user_id}/credentials/OS-EC2/{credential_id} identity:get_role GET /v3/roles/{role_id} identity:list_roles GET /v3/roles identity:create_role POST /v3/roles identity:update_role PATCH /v3/roles/{role_id} identity:delete_role DELETE /v3/roles/{role_id} identity:get_domain_role GET /v3/roles/{role_id} where role.domain_id is not null identity:list_domain_roles GET /v3/roles?domain_id where role.domain_id is not null identity:create_domain_role POST /v3/roles where role.domain_id is not null identity:update_domain_role PATCH /v3/roles/{role_id} where role.domain_id is not null identity:delete_domain_role DELETE /v3/roles/{role_id} where role.domain_id is not null identity:get_implied_role GET /v3/roles/{prior_role_id}/implies/{implied_role_id} identity:list_implied_roles GET /v3/roles/{prior_role_id}/implies identity:create_implied_role PUT /v3/roles/{prior_role_id}/implies/{implied_role_id} identity:delete_implied_role DELETE /v3/roles/{prior_role_id}/implies/{implied_role_id} identity:list_role_inference_rules GET /v3/role_inferences identity:check_implied_role HEAD /v3/roles/{prior_role_id}/implies/{implied_role_id} identity:check_grant GET `grant_resources`_ identity:list_grants GET `grant_collections`_ identity:create_grant PUT `grant_resources`_ identity:revoke_grant DELETE `grant_resources`_ identity:list_role_assignments GET /v3/role_assignments identity:list_role_assignments_for_tree GET /v3/role_assignments?include_subtree identity:get_policy GET /v3/policy/{policy_id} identity:list_policies GET /v3/policy identity:create_policy POST /v3/policy identity:update_policy PATCH /v3/policy/{policy_id} identity:delete_policy DELETE /v3/policy/{policy_id} identity:check_token HEAD /v3/auth/tokens identity:validate_token - GET /v2.0/tokens/{token_id} - GET /v3/auth/tokens identity:validate_token_head HEAD /v2.0/tokens/{token_id} identity:revocation_list - GET /v2.0/tokens/revoked - GET /v3/auth/tokens/OS-PKI/revoked identity:revoke_token DELETE /v3/auth/tokens identity:create_trust POST /v3/OS-TRUST/trusts identity:list_trusts GET /v3/OS-TRUST/trusts identity:list_roles_for_trust GET /v3/OS-TRUST/trusts/{trust_id}/roles identity:get_role_for_trust GET /v3/OS-TRUST/trusts/{trust_id}/roles/{role_id} identity:delete_trust DELETE /v3/OS-TRUST/trusts/{trust_id} identity:create_consumer POST /v3/OS-OAUTH1/consumers identity:get_consumer GET /v3/OS-OAUTH1/consumers/{consumer_id} identity:list_consumers GET /v3/OS-OAUTH1/consumers identity:delete_consumer DELETE /v3/OS-OAUTH1/consumers/{consumer_id} identity:update_consumer PATCH /v3/OS-OAUTH1/consumers/{consumer_id} identity:authorize_request_token PUT /v3/OS-OAUTH1/authorize/{request_token_id} identity:list_access_token_roles GET /v3/users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id}/roles identity:get_access_token_role GET /v3/users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id}/roles/{role_id} identity:list_access_tokens GET /v3/users/{user_id}/OS-OAUTH1/access_tokens identity:get_access_token GET /v3/users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id} identity:delete_access_token DELETE /v3/users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id} identity:list_projects_for_endpoint GET /v3/OS-EP-FILTER/endpoints/{endpoint_id}/projects identity:add_endpoint_to_project PUT /v3/OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id} identity:check_endpoint_in_project GET /v3/OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id} identity:list_endpoints_for_project GET /v3/OS-EP-FILTER/projects/{project_id}/endpoints identity:remove_endpoint_from_project DELETE /v3/OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id} identity:create_endpoint_group POST /v3/OS-EP-FILTER/endpoint_groups identity:list_endpoint_groups GET /v3/OS-EP-FILTER/endpoint_groups identity:get_endpoint_group GET /v3/OS-EP-FILTER/endpoint_groups/{endpoint_group_id} identity:update_endpoint_group PATCH /v3/OS-EP-FILTER/endpoint_groups/{endpoint_group_id} identity:delete_endpoint_group DELETE /v3/OS-EP-FILTER/endpoint_groups/{endpoint_group_id} identity:list_projects_associated_with_endpoint_group GET /v3/OS-EP-FILTER/endpoint_groups/{endpoint_group_id}/projects identity:list_endpoints_associated_with_endpoint_group GET /v3/OS-EP-FILTER/endpoint_groups/{endpoint_group_id}/endpoints identity:get_endpoint_group_in_project GET /v3/OS-EP-FILTER/endpoint_groups/{endpoint_group_id}/projects/{project_id} identity:list_endpoint_groups_for_project GET /v3/OS-EP-FILTER/projects/{project_id}/endpoint_groups identity:add_endpoint_group_to_project PUT /v3/OS-EP-FILTER/endpoint_groups/{endpoint_group_id}/projects/{project_id} identity:remove_endpoint_group_from_project DELETE /v3/OS-EP-FILTER/endpoint_groups/{endpoint_group_id}/projects/{project_id} identity:create_identity_provider PUT /v3/OS-FEDERATION/identity_providers/{idp_id} identity:list_identity_providers GET /v3/OS-FEDERATION/identity_providers identity:get_identity_providers GET /v3/OS-FEDERATION/identity_providers/{idp_id} identity:update_identity_provider PATCH /v3/OS-FEDERATION/identity_providers/{idp_id} identity:delete_identity_provider DELETE /v3/OS-FEDERATION/identity_providers/{idp_id} identity:create_protocol PUT /v3/OS-FEDERATION/identity_providers/{idp_id}/protocols/{protocol_id} identity:update_protocol PATCH /v3/OS-FEDERATION/identity_providers/{idp_id}/protocols/{protocol_id} identity:get_protocol GET /v3/OS-FEDERATION/identity_providers/{idp_id}/protocols/{protocol_id} identity:list_protocols GET /v3/OS-FEDERATION/identity_providers/{idp_id}/protocols identity:delete_protocol DELETE /v3/OS-FEDERATION/identity_providers/{idp_id}/protocols/{protocol_id} identity:create_mapping PUT /v3/OS-FEDERATION/mappings/{mapping_id} identity:get_mapping GET /v3/OS-FEDERATION/mappings/{mapping_id} identity:list_mappings GET /v3/OS-FEDERATION/mappings identity:delete_mapping DELETE /v3/OS-FEDERATION/mappings/{mapping_id} identity:update_mapping PATCH /v3/OS-FEDERATION/mappings/{mapping_id} identity:create_service_provider PUT /v3/OS-FEDERATION/service_providers/{sp_id} identity:list_service_providers GET /v3/OS-FEDERATION/service_providers identity:get_service_provider GET /v3/OS-FEDERATION/service_providers/{sp_id} identity:update_service_provider PATCH /v3/OS-FEDERATION/service_providers/{sp_id} identity:delete_service_provider DELETE /v3/OS-FEDERATION/service_providers/{sp_id} identity:get_auth_catalog GET /v3/auth/catalog identity:get_auth_projects GET /v3/auth/projects identity:get_auth_domains GET /v3/auth/domains identity:list_projects_for_groups GET /v3/OS-FEDERATION/projects identity:list_domains_for_groups GET /v3/OS-FEDERATION/domains identity:list_revoke_events GET /v3/OS-REVOKE/events identity:create_policy_association_for_endpoint PUT /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/endpoints/{endpoint_id} identity:check_policy_association_for_endpoint GET /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/endpoints/{endpoint_id} identity:delete_policy_association_for_endpoint DELETE /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/endpoints/{endpoint_id} identity:create_policy_association_for_service PUT /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/services/{service_id} identity:check_policy_association_for_service GET /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/services/{service_id} identity:delete_policy_association_for_service DELETE /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/services/{service_id} identity:create_policy_association_for_region_and_service PUT /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/services/{service_id}/regions/{region_id} identity:check_policy_association_for_region_and_service GET /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/services/{service_id}/regions/{region_id} identity:delete_policy_association_for_region_and_service DELETE /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/services/{service_id}/regions/{region_id} identity:get_policy_for_endpoint GET /v3/endpoints/{endpoint_id}/OS-ENDPOINT-POLICY/policy identity:list_endpoints_for_policy GET /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/endpoints identity:create_domain_config PUT /v3/domains/{domain_id}/config identity:get_domain_config - GET /v3/domains/{domain_id}/config - GET /v3/domains/{domain_id}/config/{group} - GET /v3/domains/{domain_id}/config/{group}/{option} identity:update_domain_config - PATCH /v3/domains/{domain_id}/config - PATCH /v3/domains/{domain_id}/config/{group} - PATCH /v3/domains/{domain_id}/config/{group}/{option} identity:delete_domain_config - DELETE /v3/domains/{domain_id}/config - DELETE /v3/domains/{domain_id}/config/{group} - DELETE /v3/domains/{domain_id}/config/{group}/{option} identity:get_domain_config_default - GET /v3/domains/config/default - GET /v3/domains/config/{group}/default - GET /v3/domains/config/{group}/{option}/default ========================================================= === .. _grant_resources: *grant_resources* are: - /v3/projects/{project_id}/users/{user_id}/roles/{role_id} - /v3/projects/{project_id}/groups/{group_id}/roles/{role_id} - /v3/domains/{domain_id}/users/{user_id}/roles/{role_id} - /v3/domains/{domain_id}/groups/{group_id}/roles/{role_id} - /v3/OS-INHERIT/domains/{domain_id}/users/{user_id}/roles/{role_id}/inherited_to_projects - /v3/OS-INHERIT/domains/{domain_id}/groups/{group_id}/roles/{role_id}/inherited_to_projects - /v3/OS-INHERIT/projects/{project_id}/users/{user_id}/roles/{role_id}/inherited_to_projects - /v3/OS-INHERIT/projects/{project_id}/groups/{group_id}/roles/{role_id}/inherited_to_projects .. _grant_collections: *grant_collections* are: - /v3/projects/{project_id}/users/{user_id}/roles - /v3/projects/{project_id}/groups/{group_id}/roles - /v3/domains/{domain_id}/users/{user_id}/roles - /v3/domains/{domain_id}/groups/{group_id}/role - /v3/OS-INHERIT/domains/{domain_id}/groups/{group_id}/roles/inherited_to_projects - /v3/OS-INHERIT/domains/{domain_id}/users/{user_id}/roles/inherited_to_projects keystone-9.0.0/doc/source/auth-totp.rst0000664000567000056710000000716612701407102021252 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =================================== Time-based One-time Password (TOTP) =================================== Configuring TOTP ================ TOTP is not enabled in Keystone by default. To enable it add the ``totp`` authentication method to the ``[auth]`` section in ``keystone.conf``: .. code-block:: ini [auth] methods = external,password,token,oauth1,totp For a user to have access to TOTP, he must have configured TOTP credentials in Keystone and a TOTP device (i.e. `Google Authenticator`_). .. _Google Authenticator: http://www.google.com/2step TOTP uses a base32 encoded string for the secret. The secret must be at least 148 bits (16 bytes). The following python code can be used to generate a TOTP secret: .. code-block:: python import base64 message = '1234567890123456' print base64.b32encode(message).rstrip('=') Example output:: GEZDGNBVGY3TQOJQGEZDGNBVGY This generated secret can then be used to add new 'totp' credentials to a specific user. Create a TOTP credential ------------------------ Create ``totp`` credentials for user: .. code-block:: bash USER_ID=b7793000f8d84c79af4e215e9da78654 SECRET=GEZDGNBVGY3TQOJQGEZDGNBVGY curl -i \ -H "Content-Type: application/json" \ -d ' { "credential": { "blob": "'$SECRET'", "type": "totp", "user_id": "'$USER_ID'" } }' \ http://localhost:5000/v3/credentials ; echo Google Authenticator -------------------- On a device install Google Authenticator and inside the app click on 'Set up account' and then click on 'Enter provided key'. In the input fields enter account name and secret. Optionally a QR code can be generated programatically to avoid having to type the information. QR code ------- Create TOTP QR code for device: .. code-block:: python import qrcode secret='GEZDGNBVGY3TQOJQGEZDGNBVGY' uri = 'otpauth://totp/{name}?secret={secret}&issuer={issuer}'.format( name='name', secret=secret, issuer='Keystone') img = qrcode.make(uri) img.save('totp.png') In Google Authenticator app click on 'Set up account' and then click on 'Scan a barcode', and then scan the 'totp.png' image. This should create a new TOTP entry in the application. Authenticate with TOTP ====================== Google Authenticator will generate a 6 digit PIN (passcode) every few seconds. Use the passcode and your user ID to authenticate using the ``totp`` method. Tokens ------ Get a token with default scope (may be unscoped) using totp: .. code-block:: bash USER_ID=b7793000f8d84c79af4e215e9da78654 PASSCODE=012345 curl -i \ -H "Content-Type: application/json" \ -d ' { "auth": { "identity": { "methods": [ "totp" ], "totp": { "user": { "id": "'$USER_ID'", "passcode": "'$PASSCODE'" } } } } }' \ http://localhost:5000/v3/auth/tokens ; echo keystone-9.0.0/doc/source/apache-httpd.rst0000664000567000056710000001274712701407102021670 0ustar jenkinsjenkins00000000000000 .. Copyright 2011-2012 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ========================= Running Keystone in HTTPD ========================= mod_proxy_uwsgi --------------- The recommended keystone deployment is to have a real web server such as Apache HTTPD or nginx handle the HTTP connections and proxy requests to an independent keystone server (or servers) running under a wsgi container such as uwsgi or gunicorn. The typical deployment will have several applications proxied by the web server (for example horizon on /dashboard and keystone on /identity, /identity_admin, port :5000, and :35357). Proxying allows the applications to be shut down and restarted independently, and a problem in one application isn't going to affect the web server or other applications. The servers can easily be run in their own virtualenvs. The httpd/ directory contains sample files for configuring HTTPD to proxy requests to keystone servers running under uwsgi. Copy the `httpd/uwsgi-keystone.conf` sample configuration file to the appropriate location for your Apache server, on Debian/Ubuntu systems it is:: /etc/apache2/sites-available/uwsgi-keystone.conf On Red Hat based systems it is:: /etc/httpd/conf.d/uwsgi-keystone.conf Update the file to match your system configuration. Enable TLS by supplying the correct certificates. Enable mod_proxy_uwsgi. * On Ubuntu the required package is libapache2-mod-proxy-uwsgi; enable using ``sudo a2enmod proxy`` * On Fedora the required package is mod_proxy_uwsgi; enable by creating a file ``/etc/httpd/conf.modules.d/11-proxy_uwsgi.conf`` containing ``LoadModule proxy_uwsgi_module modules/mod_proxy_uwsgi.so`` Enable the site by creating a symlink from the file in ``sites-available`` to ``sites-enabled``, for example, on Debian/Ubuntu systems (not required on Red Hat based systems):: ln -s /etc/apache2/sites-available/uwsgi-keystone.conf /etc/apache2/sites-enabled/ Start or restart HTTPD to pick up the new configuration. Now configure and start the uwsgi services. Copy the `httpd/keystone-uwsgi-admin.ini` and `httpd/keystone-uwsgi-public.ini` files to `/etc/keystone`. Update the files to match your system configuration (for example, you'll want to set the number of threads for the public and admin servers). Start up the keystone servers using uwsgi:: $ sudo pip install uwsgi $ uwsgi /etc/keystone/keystone-uwsgi-admin.ini $ uwsgi /etc/keystone/keystone-uwsgi-public.ini mod_wsgi -------- .. WARNING:: Running Keystone under HTTPD in this configuration does not support the use of ``Transfer-Encoding: chunked``. This is due to a limitation with the WSGI spec and the implementation used by ``mod_wsgi``. It is recommended that all clients assume Keystone will not support ``Transfer-Encoding: chunked``. Copy the ``httpd/wsgi-keystone.conf`` sample configuration file to the appropriate location for your Apache server, on Debian/Ubuntu systems it is:: /etc/apache2/sites-available/wsgi-keystone.conf On Red Hat based systems it is:: /etc/httpd/conf.d/wsgi-keystone.conf Update the file to match your system configuration. Note the following: * Make sure the correct log directory is used. Some distributions put httpd server logs in the ``apache2`` directory and some in the ``httpd`` directory. * Enable TLS by supplying the correct certificates. Keystone's primary configuration file (``etc/keystone.conf``) and the PasteDeploy configuration file (``etc/keystone-paste.ini``) must be readable to HTTPD in one of the default locations described in :doc:`configuration`. Enable the site by creating a symlink from the file in ``sites-available`` to ``sites-enabled``, for example, on Debian/Ubuntu systems (not required on Red Hat based systems):: ln -s /etc/apache2/sites-available/wsgi-keystone.conf /etc/apache2/sites-enabled/ Restart Apache to have it start serving keystone. Access Control -------------- If you are running with Linux kernel security module enabled (for example SELinux or AppArmor) make sure that the file has the appropriate context to access the linked file. Keystone Configuration ---------------------- Make sure that when using a token format that requires persistence, you use a token persistence driver that can be shared between processes. The SQL and memcached token persistence drivers provided with keystone can be shared between processes. .. WARNING:: The KVS (``kvs``) token persistence driver cannot be shared between processes so must not be used when running keystone under HTTPD (the tokens will not be shared between the processes of the server and validation will fail). For SQL, in ``/etc/keystone/keystone.conf`` set:: [token] driver = sql For memcached, in ``/etc/keystone/keystone.conf`` set:: [token] driver = memcache All servers that are storing tokens need a shared backend. This means that either all servers use the same database server or use a common memcached pool. keystone-9.0.0/doc/source/devref/0000775000567000056710000000000012701407246020045 5ustar jenkinsjenkins00000000000000keystone-9.0.0/doc/source/devref/development.environment.rst0000664000567000056710000001202012701407102025446 0ustar jenkinsjenkins00000000000000.. Copyright 2011-2012 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================= Setting up a Keystone development environment ============================================= This document describes getting the source from keystone's `Git repository`_ for development purposes. To install Keystone from packaging, refer instead to Keystone's `User Documentation`_. .. _`Git Repository`: http://git.openstack.org/cgit/openstack/keystone .. _`User Documentation`: http://docs.openstack.org/ Prerequisites ============= This document assumes you are using Ubuntu, Fedora or openSUSE (SLE) And that you have the following tools available on your system: - Python_ 2.7 and 3.4 - git_ - setuptools_ - pip_ - msgfmt (part of the gettext package) - virtualenv_ - tox_ **Reminder**: If you're successfully using a different platform, or a different version of the above, please document your configuration here! .. _Python: http://www.python.org/ .. _git: http://git-scm.com/ .. _setuptools: http://pypi.python.org/pypi/setuptools .. _tox: https://pypi.python.org/pypi/tox Getting the latest code ======================= Make a clone of the code from our `Git repository`: .. code-block:: bash $ git clone https://git.openstack.org/openstack/keystone.git When that is complete, you can: .. code-block:: bash $ cd keystone Installing dependencies ======================= Keystone maintains two lists of dependencies:: requirements.txt test-requirements.txt The first is the list of dependencies needed for running keystone, the second list includes dependencies used for active development and testing of Keystone itself. These dependencies can be installed from PyPi_ using the Python tool pip_. .. _PyPi: http://pypi.python.org/ .. _pip: http://pypi.python.org/pypi/pip However, your system *may* need additional dependencies that `pip` (and by extension, PyPi) cannot satisfy. These dependencies should be installed prior to using `pip`, and the installation method may vary depending on your platform. Ubuntu 14.04, 15.10: .. code-block:: bash $ sudo apt-get install python-dev python3-dev libxml2-dev libxslt1-dev \ libsasl2-dev libsqlite3-dev libssl-dev libldap2-dev libffi-dev Fedora 19+: .. code-block:: bash $ sudo yum install python-lxml python-greenlet-devel python-ldap \ sqlite-devel openldap-devel python-devel libxslt-devel \ openssl-devel libffi-devel openSUSE 13.2 (SLE 12): .. code-block:: bash $ sudo zypper install libxslt-devel openldap2-devel libopenssl-devel \ python-devel python-greenlet-devel python-ldap python-lxml \ python-pysqlite sqlite3-devel PyPi Packages and VirtualEnv ---------------------------- We recommend establishing a virtualenv to run Keystone within. virtualenv limits the Python environment to just what you're installing as dependencies, useful to keep a clean environment for working on Keystone. .. code-block:: bash $ tox -e venv --notest This will create a local virtual environment in the directory ``.tox``. Once created, you can activate this virtualenv for your current shell using: .. code-block:: bash $ source .tox/venv/bin/activate The virtual environment can be disabled using the command: .. code-block:: bash $ deactivate You can also use ``tox -e venv`` to prefix commands so that they run within the virtual environment. For more information on virtual environments, see virtualenv_. .. _virtualenv: http://www.virtualenv.org/ If you want to run Keystone outside of a virtualenv, you can install the dependencies directly into your system from the requirements files: .. code-block:: bash # Install the dependencies for running keystone $ pip install -r requirements.txt # Install the dependencies for developing, testing, and running keystone $ pip install -r test-requirements.txt # Use 'python setup.py' to link Keystone into Python's site-packages $ python setup.py develop Verifying Keystone is set up ============================ Once set up, either directly or within a virtualenv, you should be able to invoke Python and import the libraries. If you're using a virtualenv, don't forget to activate it: .. code-block:: bash $ source .tox/venv/bin/activate You should then be able to `import keystone` using Python without issue: .. code-block:: bash $ python -c "import keystone" If you can import Keystone without a traceback, you should be ready to move on to :doc:`../developing`. keystone-9.0.0/doc/Makefile0000664000567000056710000001317112701407102016724 0ustar jenkinsjenkins00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = build SOURCEDIR = source SPHINXAPIDOC = sphinx-apidoc # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext help: @echo "Please use \`make ' where is one of" @echo " autodoc generate the autodoc templates" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: -rm -rf $(BUILDDIR)/* autodoc: $(SPHINXAPIDOC) -f -o $(SOURCEDIR) ../keystone html: autodoc $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/keystone.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/keystone.qhc" devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/keystone" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/keystone" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." keystone-9.0.0/doc/README.rst0000664000567000056710000000034612701407102016753 0ustar jenkinsjenkins00000000000000Building Docs ============= Developer documentation is generated using Sphinx. To build this documentation, run the following from the root of the repository:: $ tox -e docs The documentation will be built at ``doc/build/``. keystone-9.0.0/LICENSE0000664000567000056710000002363712701407102015534 0ustar jenkinsjenkins00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. keystone-9.0.0/ChangeLog0000664000567000056710000073101012701407245016300 0ustar jenkinsjenkins00000000000000CHANGES ======= 9.0.0 ----- * Update federated user display name with shadow_users_api 9.0.0.0rc2 ---------- * Correct `role_name` constraint dropping * Imported Translations from Zanata * Imported Translations from Zanata * Fix keystone-manage config file path * Correct test to support changing N release name * Imported Translations from Zanata * Imported Translations from Zanata * Imported Translations from Zanata * Imported Translations from Zanata * Imported Translations from Zanata * Add release note for list_limit support * Imported Translations from Zanata * Update .gitreview for stable/mitaka 9.0.0.0rc1 ---------- * Support `id` and `enabled` attributes when listing service providers * Check for already present user without inserting in Bootstrap * Mapping which yield no identities should result in ValidationError * Make backend filter testing more comprehensive * Change xrange to range for python3 compatibility * Remove reference to keystoneclient CLI * Document running in uwsgi proxied by apache * Updating sample configuration file * Imported Translations from Zanata * Correct Hints class filter documentation * Release note cleanup * Update reported version for Mitaka * Add docs for additional bootstrap endpoint parameters * Remove unused notification method and class * Consolidate @notifications.internal into Audit * Imported Translations from Zanata * Remove some translations * Imported Translations from Zanata * Fixed user in group participance * register the config generator default hook with the right name * Imported Translations from Zanata * Rename v2 token schema used for validation * Migrate_repo init version helper * Remove TestFernetTokenProvider * Refactor TestFernetTokenProvider trust-scoped tests * Refactor TestFernetTokenProvider project-scoped tests * Refactor TestFernetTokenProvider domain-scoped tests * Refactor TestFernetTokenProvider unscoped token tests * Fixing mapping schema to allow local user * Fix keystone-manage example command path * Add auto-increment int primary key to revoke.backends.sql * Add PKIZ coverage to trust tests * Consolidate TestTrustRedelegation and TestTrustAuth tests * Split out domain config driver and manager tests * Add notifications to user/group membership * Add ability to send notifications for actors * Updated from global requirements * Remove foreign assignments when deleting a domain * Correct create_project driver versioning * Explicitly exclude tests from bandit scan * Move role backend tests * v2 tokens validated on the v3 API are missing timezones * Move domain config backend tests * Validate v2 fernet token returns extra attributes * Clarify virtualenv setup in developer docs * Fixes a few LDAP tests to actually run * Imported Translations from Zanata * Un-wrap function * Fix warning when running tox * Race condition in keystone domain config * Adding 'domain_id' filter to list_user_projects() * Add identity endpoint creation to bootstrap * Updated from global requirements * Remove _disable_domain from the resource API * Remove _disable_project from the resource API * Remove the notification.disabled decorator * Remove unused notification decorators * Cleanup from from split of token backend tests * Split identity backend tests * Split policy backend tests * Split catalog backend tests * Split trust backend tests * Split token backend tests * Split resource backend tests * Split assignment backend tests * Updated from global requirements * Consolidate configuration default overrides * Updating sample configuration file * IPV6 test unblacklist * Fix trust chain tests 9.0.0.0b3 --------- * Minor edits to the developing doc * Add release notes for projects acting as domains * Fix keystone.common.wsgi to explicitly use bytes * fix sample config link that 404s * add hints to list_services for templated backend * Fixes hacking for Py3 tests * Fixes to get cert tests running in Py3 * Fixes the templated backend tests for Python3 * remove pyc files before running tests * Stop using oslotest.BaseTestCase * Return 404 instead of 401 for tokens w/o roles * Remove unused domain driver method in legacy wrapper * Deprecate domain driver interface methods * Fix the migration issue for the user doesn't have a password * Add driver details in architecture doc * Shadow users - Shadow federated users * Projects acting as domains * Update developer docs for ubuntu 15.10 * Moved CORS middleware configuration into oslo-config-generator * V2 operations create default domain on demand * Make keystone tests work on leap years * Updating sample configuration file * Fix doc build warnings * Enable LDAP connection pooling by default * Delay using threading.local() to fix check job failure * Minor edits to the installation doc * Minor edits to the configuration doc * Minor community doc edits * Updated from global requirements * Followup for LDAP removal * Remove get_session and get_engine * No more legacy engine facade in tests * Use requst local in-process cache per request * Move admin_token_auth before build_auth_context in sample paste.ini * Update default domain's description * Reference config values at runtime * Use the new enginefacade from oslo.db * Updated from global requirements * Fix incorrect assumption when deleting assignments * Remove migration_helpers.get_default_domain * db_sync doesn't create default domain * Implied roles index with cascading delete * Fix project-related forbidden response messages * Fixes a bug when setting a user's password to null * Renamed TOTP passcode generation function * Updates TOTP release note * Simplify use of secure_proxy_ssl_header * Shadow users - Separate user identities * Switch to configless bandit * Parameter to return audit ids only in revocation list * Add tests for fetching the revocation list * Updating sample configuration file * Deprecate logger.WritableLogger * Removing H405 violations from keystone * Updated from global requirements * Updated from global requirements * Updating sample configuration file * Remove useless {} from __table_args__ * Time-based One-time Password * Fix inconsistencies between Oauth1DriverV8 interface and driver * Oauth1 manager sets consumer secret * Remove setting class variable * Allow user list without specifying domain * Adds user_description_attribute mapping support to the LDAP backend * encode user id for notifications * Add back a bandit tox job * Enable support for posixGroups in LDAP * Add is_domain filter to v3 list_projects * Add tests in preparation of projects acting as a domain * Avoid using `len(x)` to check if x is empty * Use the driver to get limits * Fallback to list_limit from default config * Add list_limit to the white list for configs in db * Updating sample configuration file * handle unicode names for federated users * Verify project unique constraints for projects acting as domains * wsgi: fix base_url finding * Disable Admin tokens set to None * Modify rules for domain specific role assignments * Modify implied roles to honor domain specific roles * Modify rules in the v3 policy sample for domain specifc roles * Re-enable and undeprecate admin_token_auth * Don't describe trusts as an extension in configuration doc * Tidy up configuration documentation for inherited assignments * Clean up configuration documentataion on v2 user CRUD * Allow project domain_id to be nullable at the manager level * Trivial: Cleanup unused conf variables * Updating sample configuration file * Updating sample configuration file * Fixes parameter in duplicate project name creation * Fix terms from patch 275706 * sensible default for secure_proxy_ssl_header * Restricting domain_id update * Allow project_id in catalog substitutions * Avoid `None` as a redundant argument to dict.get() * Avoid "non-Pythonic" method names * Manager support for project cascade update * Updating sample configuration file * Expand implied roles in trust tokens * add a test that uses trusts and implies roles * Updating sample configuration file * Convert assignment.root_role config option to list of strings * Avoid wrong deletion of domain assignments * Manager support for project cascade delete * AuthContextMiddleware admin token handling * Deprecate admin_token_auth * Adds better logging to the domain config finder * Extracts logic for finding domain configs * Fix nits from domain specific roles CRUD support * Change get_project permission * Updated from global requirements * Enables token_data_helper tests for Python3 * Stop using nose as a Python3 test runner * Fix release note of removal of v2.0 trusts support * Remove PostParams middleware * Updated from global requirements * Moves policy setup into a fixture * Make pep8 *the* linting interface * Added tokenless auth headers to CORS middleware * Add backend support for deleting a projects list * Make fernet work with oauth1 authentication * Consolidate the fernet provider validate_v2_token() * Remove support for trusts in v2.0 * Add CRUD support for domain specific roles * Added CORS support to Keystone * Deprecate Saml2 auth plugin * Uses open context manager for templated catalogs * Disable the ipv6 tests in py34 * Missing 'region' in service and 'name' in endpoint for EndpointFilterCatalog * Small typos on the ldap.url config option help * Replace exit() with sys.exit() * include sample config file in docs * Fixes a language issue in a release note * Imported Translations from Zanata * Updated from global requirements * Support multiple URLs for LDAP server * Set deprecated_reason on deprecated config options * Move user and admin crud to core * squash migrations - kilo * Adds validation negative unit tests * Use oslo.log specified method to set log levels * Add RENO update for simple_cert_extension deprecation * Opt-out certain Keystone Notifications * Update the home page * Release notes for implied roles * deprecate pki_setup from keystone-manage * test_credential.py work with python34 * Consolidate `test_contrib_ec2.py` into `test_credential.py` * Reinitialize the policy engine where it is needed * Provide an error message if downgrading schema * Updated from global requirements * Consolidate the fernet provider issue_v2_token() * Consolidate the fernet provider validate_v3_token() * Add tests for role management with v3policy file * Fix some word spellings * Make WebSSO trusted_dashboard hostname case-insensitive * Deprecate simple_cert extension * Do not assign admin to service users * Add in TRACE logging for the manager * Add schema for OAuth1 consumer API * Correct docstrings * Remove un-used test code * Raise more precise exception on keyword mapping errors * Allow '_' character in mapping_id value * Implied Roles API * Revert "Unit test for checking cross-version migrations compatibility" * replace tenant with project in cli.py * Fix schema validation to use JSONSchema for empty entity * Replace tenant for project in resource files * Reuse project scoped token check for trusts * Add checks for project scoped data creep to tests * Add checks for domain scoped data creep * Use the oslo.utils.reflection to extract the class name * Test hyphens instead of underscores in request attributes * Simplify admin_required policy * Add caching to role assignments * Enable bandit tests * Update bandit.yaml * Enhance manager list_role_assignments to support group listing * remove KVS backend for keystone.contrib.revoke * Fix trust redelegation and associated test * use self.skipTest instead of self.skip * Removed deprecated revoke KVS backend * Revert "skip test_get_token_id_error_handling to get gate passing" * Updated from global requirements * Updated from global requirements * skip test_get_token_id_error_handling to get gate passing * Ensure pycadf initiator IDs are UUID * Check for circular references when expanding implied roles * Improves domain name case sensitivity tests * Fixes style issues in a v2 controller tests * Prevents creating is_domain=True projects in v2 * Refactors validation tests to better see the cases * Remove keystone/common/cache/_memcache_pool.py * Update mod_wsgi + cache config docs * Address comments from Implied Role manager patch * Fix nits in include names patch * Unit test for checking cross-version migrations compatibility * Online schema migration documentation * Updated from global requirements * Remove additional references to ldap role attribs * Remove duplicate LDAP test class * Remove more ldap project references 9.0.0.0b2 --------- * Add testcases to check cache invalidation * Fix typo abstact in comments * deprecate write support for identity LDAP * Deprecate `hash_algorithm` config option * Mark memcache and memcache_pool token deprecated * List assignments with names * Remove LDAP Role Backend * Remove LDAP Resource and LDAP Assignment backends * Removes KVS catalog backend * Fix docstring * Strengthen Mapping Validation in Federation Mappings * Add checks for token data creep using jsonschema * Deprecating API v2.0 * Implied roles driver and manager * Add support for strict url safe option on new projects and domains * Remove bandit tox environment * Add linters environment, keep pep8 as alias * Make sure the assignment creation use the right arguments * Fix indentation for oauth context * Imported Translations from Zanata * document the bootstrapping process * Add release note for revert of c4723550aa95be403ff591dd132c9024549eff10 * Updated from global requirements * Enable `id`, `enabled` attributes filtering for list IdP API * Improve Conflict error message in IdP creation * Fedora link is too old and so updated with newer version * Support the reading of default values of domain configuration options * Correct docstrings for federation driver interface * Update v3policysample tests to use admin_project not special domain_id * Enable limiting in ldap for groups * Enable limiting in ldap for users * Doc FIX * Store config in drivers and use it to get list_limit * Add asserts for service providers * Fix incorrect signature in federation legacy V8 wrapper * Tidy up release notes for V9 drivers * Adds an explicit utils import in test_v3_protection.py * Refactor test auth_plugin config into fixture * Create V9 version of resource driver interface * Updated from global requirements * Separate trust crud tests from trust auth tests * Delete checks for default domain delete * correct help text for bootstrap command * Replace unicode with six.text_type * Escape DN in enabled query * Test enabled emulation with special user_tree_dn * SQL migrations for implied roles * Revert "Validate domain ownership for v2 tokens" * Use assertIn to check if collection contains value * Updated from global requirements * Perform middleware tests with webtest * De-duplicate fernet payload tests * Reference driver methods through the Manager * Fix users in group and groups for user exact filters * Expose defect in users_in_group, groups_for_user exact filters * Replace deprecated library function os.popen() with subprocess * OAuth1 driver doesnt inherit its interface * Update man pages with Mitaka version and dates * Fixes hacking logger test cases to use same base * Adds a hacking check looking for Logger.warn usage * Change LOG.warn to LOG.warning * Remove redundant check after enforcing schema validation * Updating sample configuration file * Create V9 version of federation driver interface * Do not use __builtin__ in python3 * Define paste entrypoints * Add schema for federation protocol * Expose method list inconsistency in federation api * remove irrelevant parenthesis * Add return value * Test: make enforce_type=True in CONF.set_override * Updated from global requirements * Add schema for identity provider * Updating sample configuration file * Use six.moves.reload_module instead of builtin reload * Fix the incompatible issue in response header * Wrong usage of "an" * Correct fernet provider reference * Correct DN/encoding in test * Support url safe restriction on new projects and domains * Correct the class name of the V9 LDAP role driver * Wrong usage of "a/an" * Trival: Remove unused logging import * Updating sample configuration file * Fix pep8 job * Fix some inconsistency in docstrings * Fix 500 error when no fernet token is passed * Cleanup tox.ini py34 test list * Fixes kvs cache key mangling issue for Py3 * Some small improvements on fernet uuid handling * Updated from global requirements * Updating sample configuration file * Fix key_repository_signature method for python3 * Add audit IDs to revocation events * Enable os_inherit of Keystone v3 API * Use pip (and DevStack) instead of setuptools in docs * Correct developer documentation on venv creation * Updating sample configuration file * Updated from global requirements * Validate domain for DB-based domain config. CRUD * fix up release notes, file deprecations under right title * Updated Cloudsample * Update `developing.rst` to remove extensions stuff * Verify that user is trustee only on issuing token * Adds a base class for functional tests * Make `bootstrap` idempotent * Add `keystone-manage bootstrap` command * Changed the key repo validation to allow read only * Deprecated tox -downloadcache option removed * Fix defect in list_user_ids that only lists direct user assignments * Show defect in list_user_ids that only lists direct user assignments * Add API route for list role assignments for tree * Use list_role_assignments to get projects/domains for user * Add `type' filter for list_credentials_for_user * Clean up new_credential_ref usage and surrounding code * Create neutron service in sample_data.sh * Updating sample configuration file * Updated from global requirements * Limiting for fake LDAP * Make @truncated common for all backends * Fix exposition of bug about limiting with ldap * Use assertDictEqual instead of assertEqualPolicies * refactor: Remove unused test method * Remove unfixable FIXME * Use new_policy_ref consistently * fix reuse of variables * Remove comments on enforcing endpoints for trust * refactor: move the common code to manager layer * Create V9 Role Driver * Create new version of assignment driver interface * Remove keystoneclient tests * Verify that attribute `enabled` equals True * Remove invalid comment about LDAP domain support * Pass dict into update() rather than **kwargs * Refactor test use of new_*_ref * Cleans up code for `is_admin` in tokens * Deprecate ldap Role * Update extensions links * Improve comments in test_catalog * Fix for GET project by project admin * Fix multiline strings with missing spaces * Updating sample configuration file * Remove invalid TODO in extensions * Updated from global requirements * Refactor: Remove use of self where not needed * Refactor: Move uncommon entities from setUp * Split resource tests from assignment tests * Remove invalid TODO related to bug 1265071 * Fix test_crud_user_project_role_grants * Deprecate the pki and pkiz token providers * Remove invalid FIXME note * Refactor: Use Federation constants where possible * Remove exposure of routers at package level * Update API version info for Liberty * remove version from setup.cfg * Ensure endpoints returned is filtered correctly * Put py34 first in the env order of tox 9.0.0.0b1 --------- * Add release notes for mitaka-1 * set `is_admin` on tokens for admin project * Use unit.new_project_ref consistently * Reference environment close to use * refactor: move variable to where it's needed * Needn't care about the sequence for cache validation * Updated from global requirements * Fix a typo in notifications function doc * Remove RequestBodySizeLimiter from middleware * Optimize "open" method with context manager * eventlet: handle system that misses TCP_KEEPIDLE * force releasenotes warnings to be treated as errors * Cleanup region refs * Remove `extras` from token data * Use subprocess.check_output instead of Popen * Remove deprecated notification event_type * Remove check_role_for_trust * Correct RoleNotFound usage * Remove example extension * Updating sample configuration file * Correct docstring warnings * Using the right format to render the docstring correctly * Add release notes for mitaka thus far * Accepts Group IDs from the IdP without domain * Cleanup use of service refs * Update docs for legacy keystone extensions * Correct SecurityError with unicode args * Updated from global requirements * Use idp_id and protocol_id in jsonhome * Use standard credential_id parameter in jsonhome * Remove core module from the legacy endpoint_filter extension * Minor cleanups for usage of group refs * Reject user creation using admin token without domain * Add Trusts unique constraint to remove duplicates * deprecate `enabled` option for endpoint-policy extension * remove useless config option in endpoint filter * Use [] where a field is required * Manager support for projects acting as domains * Config option for insecure responses * Add missing colon separators to inline comments * Simplify LimitTests * Rationalize list role assignment routing * Enable listing of role assignments in a project hierarchy * Capital letters * remove use of magic numbers in sql migrate extension tests * Use new_trust_ref consistently * Updating sample configuration file * Move endpoint_filter migrations into keystone core * Move endpoint filter into keystone core * Move revoke sql migrations to common * Move revoke extension into core * Move oauth1 sql migrations to common * Move oauth1 extension into core * Move federation sql migrations to common * Move federation extension into keystone core * Fix string conversion in s3 handler for python 2 * Fix inaccurate debug mode response * Use unit.new_user_ref consistently * Imported Translations from Zanata * Updated from global requirements * Add testcases to check cache invalidation in endpoint filter extension * Fix the wrong method name * Updating sample configuration file * change some punctuation marks * Updated from global requirements * Remove hardcoded LDAP group schema from emulated enabled mix-in * Exclude old Shibboleth options from docs * Updated from global requirements * Use new_domain_ref instead of manually created ref * Use new_region_ref instead of manually created dict * Document release notes process * Use new_service_ref instead of manually created dict * Use unit.new_group_ref consistently * Use unit.new_role_ref consistently * Use unit.new_domain_ref consistently * Use unit.new_region_ref() consistently * Use unit.new_service_ref() consistently * Move AuthContext middleware into its own file * Use unit.new_endpoint_ref consistently * Use list_role_assignments to get assignments by role_id * Pass kwargs when using revoke_api.list_events() * Add reno for release notes management * Make K2K Mapping Attribute Examples more visible * Add S3 signature v4 checking * Fix some nits inside validation/config.py * Add Mapping Combinations for Keystone to Keystone Federation * Remove manager-driver assignment metadata construct * Correct description in Keystone key_terms * Imported Translations from Zanata * Handle fernet payload timestamp differences * Fix fernet padding for python 3 * More useful message when using direct driver import * Get user role without project id is not implemented * Update sample catalog templates * update mailmap with gyee's new email * Revert "Added CORS support to Keystone" * Updated from global requirements * test_backend_sql work with python34 * Use assertTrue/False instead of assertEqual(T/F) * Fix the issues found with local conf * Add test for security error with no message * Add exception unit tests with different message types * Cleanup message handling in test_exception * Normalize fernet payload disassembly * Common arguments for fernet payloads assembly * Capitalize a Few Words * I18n safe exceptions * Keystone Spelling Errors in docstrings and comments * [rally] remove deprecated arg * Move endpoint_policy migrations into keystone core * Promote an arbitrary string to be a docstring * Fix D204: blank line required after class docstring (PEP257) * Fix D202: No blank lines after function docstring (PEP257) * Update Configuring Keystone doc for consistency * Comment spelling error in assignment.core file * Fix exceptions to use correct titles * Fix UnexpectedError exceptions to use debug_message_format * Fix punctuation in doc strings * Fix docstring * Updating sample configuration file * Explain default domain in docs for other services * Correct bashate issues in gen_pki.sh * Fix incorrect federated mapping example * change stackforge url to openstack url * Updated from global requirements * Adds already passing tests to py34 run * Wrong usage of "an" * Allow the PBR_VERSION env to pass through tox * Fix D200: 1 line docstrings should fit with quotes (PEP257) * Fix D210: No whitespaces allowed surrounding docstring text (PEP257) * Fix D300: Use """triple double quotes""" (PEP257) * Fix D402: First line should not be the function's "signature" (PEP257) * Fix D208: Docstring over indented. (PEP257) * Add docstring validation * Add caching to get_catalog * Fix fernet key writing for python 3 * Update test modules passing on py34 * Updated from global requirements * Forbid non-stripped endpoint urls * fix deprecation warnings in cache backends * Create tests for set_default_is_domain in LDAP * Enable try_except_pass Bandit test * Enable subprocess_without_shell_equals_true Bandit test * Correct typo in copyright * Updated from global requirements * switch to oslo.cache * Updating sample configuration file * Updated from global requirements * keystone-paste.ini docs for deployers are out of date * Correct the filename * More info in RequestContext * Fix some nits in `configure_federation.rst` * add placeholder migrations for liberty * Remove bas64utils and tests * Create a version package * Remove oslo.policy implementation tests from keystone * Refactor: Don't hard code 409 Conflict error codes * Fix use of TokenNotFound * Refactor: change 403 status codes in test names * Refactor: change 410 status codes in test names * Refactor: change 400 status codes in test names * Refactor: change 404 status codes in test names * Updated from global requirements * Imported Translations from Zanata * add initiator to v2 calls for additional auditing * Fixed missed translatable string inside exception * Handle 16-char non-uuid user IDs in payload * Additional documentation for services * Rename fernet methods to match expiration timestamp * Updated from global requirements * Enable password_config_option_not_marked_secret Bandit test * Enable hardcoded_bind_all_interfaces Bandit test * Documentation for other services * Reclassify get_project_by_name() controller method * Trivial fix of some typos found * Filters is_domain=True in v2 get_project_by_name * Add test case passing is_domain flag as False 8.0.0 ----- * Ensure token validation works irrespective of padding * Ensure token validation works irrespective of padding * Imported Translations from Zanata * Rename RestfulTestCase.v3_authenticate_token() to v3_create_token() * Improving domain_id update tests * Show v3 endpoints in v2 endpoint list * Expose 1501698 bug * Replace sqlalchemy-migrate occurences from code.google to github * Fix unreachable code in test_v3 module * Imported Translations from Zanata * Use deepcopy of mapping fixtures in tests * Show v3 endpoints in v2 endpoint list * Enable Bandit 0.13.2 tests * Update bandit blacklist_imports config * Cleanup _build_federated_info * Add LimitRequestBody to sample httpd config * Make __all__ immutable * Skip rows with empty remote_ids * Includes server_default option in is_domain column * Remove unused get_user_projects() * Deprecate httpd/keystone.py * Skip rows with empty remote_ids * Fix order of arguments in assertDictEqual * Cleanup fernet validate_v3_token * Update bandit blacklist_calls config * Add unit test for creating RequestContext * Add user_domain_id, project_domain_id to auth context * Add user domain info to federated fernet tokens * Unit tests for fernet validate_v3_token * Fix order of arguments in assertEqual * Updating sample configuration file * Cleanup of Translations * Imported Translations from Zanata * Uses constants for 5XX http status codes in tests * Fixes v3_authenticate_token calls - no default * Fixes the way v3_admin is called to match its def * Declares expected_status in method signatures * Refactor: Don't hard code the error code * Correct docstrings * Correct comment to not be driver-specific * Move development environment setup instructions to standard location * Fix typo in config help * Use the correct import for range * Adds interface tests for timeutils * Add unit tests for token_to_auth_context * Updating sample configuration file 8.0.0.0rc1 ---------- * Open Mitaka development * Bring bandit config up-to-date * Update the examples used for the trusted_dashboard option * Log message when debug is enabled * Clean up bandit profiles * federation.idp use correct subprocess * Change ignore-errors to ignore_errors * Imported Translations from Zanata * Remove unused code in domain config checking * Relax newly imposed sql driver restriction for domain config * Add documentation for configuring IdP WebSSO * Updated from global requirements * check if tokenless auth is configured before validating * Fix the referred [app:app_v3] into [pipeline:api_v3] * Updated from global requirements * Issue deprecation warning if domain_id not specified in create call * functional tests for keystone on subpaths * Removed the extra http:// from JSON schema link * Document httpd for accept on /identity, /identity_admin * Updated from global requirements * Update federation router with missing call * Reject rule if assertion type unset * Update man pages with liberty version and dates * Refactor: Don't hard code the error code * Move TestClient to test_versions * Use oslo.log fixture * Update apache-httpd.rst * Updated from global requirements * Remove padding from Fernet tokens * Imported Translations from Transifex * Updated from global requirements * Fixed typos in 'developing_drivers' doc * Stop using deprecated keystoneclient function * Change tests to use common name for keystone.tests.unit * Removes py3 test import hacks * Updating sample configuration file * Fixes confusing deprecation message 8.0.0.0b3 --------- * Add methods for checking scoped tokens * Build oslo.context RequestContext * Correct docstring for common.authorization * Deprecate LDAP Resource Backend * Added CORS support to Keystone * List credentials by type * Fixes a typo in a comment * Tokenless authz with X.509 SSL client certificate * Support project hierarchies in data driver tests * Stable Keystone Driver Interfaces * Initial support for versioned driver classes * Add federated auth for idp specific websso * Adds caching to paste deploy's egg lookup * Fix grammar in doc string * Test list_role_assignment in standard inheritance tests * Broaden domain-group testing of list_role_assignments * Add support for group membership to data driven assignment tests * Add support for effective & inherited mode in data driven tests * Add support for data-driven backend assignment testing * Updated from global requirements * Change JSON Home for OS-FEDERATION to use /auth/projects|domains * Unit tests for is_domain field in project's table * Group tox optional dependencies * Provide new_xyz_ref functions in tests.core * Refactor mapping rule engine tests to not create servers * Updating sample configuration file * Correct docstrings in resource/core.py * Validate Mapped User object * Set max on max_password_length to passlib max * Simplify federated_domain_name processing * Get method's class name in a python3-compatible way * Stop reading local config for domain-specific SQL config driver * Enforce .config_overrides is called exactly once * Use /auth/projects in tests * Remove keystone/openstack/* from coveragerc * Rationalize unfiltered list role assignment test * Change mongodb extras to lowercase * Refactor: Provider._rebuild_federated_info() * Refactor: rename Fernet's unscoped federated payload * Fernet payloads for federated scoped tokens * No More .reload_backends() or .reload_backend() * Ensure ephemeral user's user_id is url-safe * Use min and max on IntOpt option types * Adds a notification testcase for unbound methods * Do not revoke all of a user's tokens when a role assignment is deleted * Handle tokens created and quickly revoked with insufficient timestamp precision * Show that unscoped tokens are revoked when deleting role assignments * Prevent exception due to missing id of LDAP entity * Expose exception due to missing id of LDAP entity * Add testcase to test invalid region id in request * Add region_id filter for List Endpoints API * Remove references to keystone.openstack.common * Remove all traces of oslo incubator * Updating sample configuration file * Test v2 tokens being deleted by v3 * Use entrypoints for paste middleware and apps * update links in http-api to point to specs repo * Add necessary executable permission * Refactor: use fixtures.TempDir more * Add is_domain field in Project Table * Prevent exception for invalidly encoded parameters * Extras for bandit * Use extras for memcache and MongoDB packages * Use wsgi_scripts to create admin and public httpd files * Update Httpd configuration docs for sites-available/enabled * Remove unnecessary check * Update 'doc/source/setup.rst' * Remove unnecessary load_backends from TestKeystoneTokenModel * Updated from global requirements * Imported Translations from Transifex * Updated from global requirements * Show helpful message when request body is not provided * Fix logging in federation/idp.py * Enhance tests for saml2 signing exception logging * Remove deprecated methods from assignment.Manager * Stop using deprecated assignment manager methods * EndpointFilter driver doesnt inherit its interface * Hardens the validated decorator's implementation * Updating sample configuration file * Simplify rule in sample v3 policy file * Improve a few random docstrings * Maintain datatypes when loading configs from DB * Remove "tenants" from user_attribute_ignore default * Use oslo_config PortOpt support * Updated from global requirements * Updated from global requirements * Fix the misspelling * When validating a V3 token as V2, use the v3_to_v2 conversion * Do not require the token_id for converting v3 to v2 tokens * Maintain the expiry of v2 fernet tokens * Fix typo in doc-string * Validate domain ownership for v2 tokens * Fix docstring in mapped plugin * Updated from global requirements * Minor grammar fixes to connection pooling section * Creates a fixture representing as LDAP database * Sample config help for supplied drivers * Improve List Role Assignments Filters Performance * Update docs for stevedore drivers * Fixes an incorrect docstring in notifications * Stop calling deprecated assignment manager methods * Updated from global requirements * Updating sample configuration file * Adds backend check to setup of LDAP tests * Improve a few random docstrings (H405) * Remove excessive transformation to list * Stop calling deprecated assignment manager methods * Remove reference of old endpoint_policy in paste file * Fernet 'expires' value loses 'ms' after validation * Correct enabled emulation query to request no attributes * NotificationsTestCase running in isolation * Adds/updates notifications test cases * Fix duplicate-key pylint issue * Fix explicit line joining with backslash * Fixes an issue with data ordering in the tests * Imported Translations from Transifex * Allow Domain Admin to get domain details * Assignment driver cleaning * Cleanup tearDown in unit tests * Fix unbound error in federation _sign_assertion * Fix typos of RoleAssignmentV3._format_entity doc * Updating sample configuration file * Updated from global requirements * Remove unnecessary check from notifications.py * Remove oslo import hacking check * Use dict.items() rather than six.iteritems() * Cleanup use of iteritems * Imported Translations from Transifex * Missing ADMIN_USER in sample_data.sh * Update exported variables for openstack client * Use extras for ldap dependencies * Add better user feedback when bind is not implemented * Test to ensure fernet key rotation results in new key sets * Better error message when unable to map user * Refactor _populate_roles_for_groups() * Add groups in scoped federated tokens * Adds missing list_endpoints tests * Reject create endpoint with invalid urls * Explain the "or None" on eventlet's client_socket_timeout * Reduce number of Fernet log messages * Fix test_admin to expect admin endpoint * Fixes a docstring to reflect actual return values * Give some message when an invalid token is in use 8.0.0.0b2 --------- * Updated from global requirements * Ensure database options registered for tests * Document sample config updated automatically * Test function call result, not function object * Test admin app in test_admin_version_v3 * Updating sample configuration file * Handle non-numeric files in key_repository * Fix remaining mention of KLWT * Updated from global requirements * Replace 401 to 404 when token is invalid * Assign different values to public and admin ports * Fix four typos and Add one space on keystone document * Reuse token_ref fetched in AuthContextMiddleware * Refactor: clean up TokenAPITests * pemutils isn't used anymore * Imported Translations from Transifex * Fix test_exception.py for py34 * Fix s3.core for py34 * Updating sample configuration file * Fix test_utils for py34 * test_base64utils works with py34 * Minor fix in the `configuration.rst` * Correct spacing in ``mapping_combinations.rst`` * add federation docs for mod_auth_mellon * Avoid the hard coding of admin token * Adding Documentation for Mapping Combinations * Clean up docs before creating new ones * Document policy target for operation * Fix docs in federation.routers * Fix docstrings in contrib * Additional Fernet test coverage * Refactor websso ``origin`` validation * Docs link to ACTIONS * Clean up code to use .items() * Document default value for tree_dn options * Remove unnecessary ldap imports * Move backends.py to keystone.server * move clean.py into keystone/common * Updated from global requirements * Remove unnecessary executable permission * Move cli.py into keystone.cmd * Do not remove expired revocation events on "get" * Clean up notifications type checking * Federation API provides method to evaluate rules * Move constants out of federation.core * Implement backend filtering on membership queries * Moves keystone.hacking into keystone.tests * Add missing "raise" when throwing exception * Log xmlsec1 output if it fails * Fix test method examining scoped federation tokens * Spelling correction * Fixes grammar in setup.rst in doc source * Updated from global requirements * Deprecate LDAP assignment driver options * Register fatal_deprecations before use * Use oslo.utils instead of home brewed tempfile * Updating sample configuration file * Add testcases for list_role_assignments of v3 domains * Centralizing build_role_assignment_* functions * Replace reference of ksc with osc * Updated from global requirements * Changing exception type to ValidationError instead of Forbidden * Standardize documentation at Service Managers * Fixes grammar in the httpd README * Fix the incorrect format for docstring * Imported Translations from Transifex * Fixes docstring to make it more precise * Removed optional dependency support * Decouple notifications from DI * Adds proper isolation to templated catalog tests * Fix log message in one of the v3 create call methods * Catch exception.Unauthorized when checking for admin * Remove convert_to_sqlite.sh * Fix for LDAP filter on group search by name * Remove fileutils from oslo-incubator * Remove comment for doc building bug 1260495 * Fix code-block in federation documentation * Modified command used to run keystone-all * Delete extra parentheses in assertEqual message * Fix the invalid testcase * Updating sample configuration file * Add unit test for fernet provider * Update federation docstring * Do not specify 'objectClass' twice in LDAP filter string * Fix tox -e py34 * Change mapping model so rules is dict * Add test case for deleting endpoint with space in url * Update requirements by hand * Consolidate the fernet provider issue_v3_token() * Group role revocation invalidates all user tokens * OS-FEDERATION no longer extension in docs * Switch from deprecated oslo_utils.timeutils.strtime * Remove unused setUp for RevokeTests * Update MANIFEST.in * Update sample config file * Disable migration sanity check * Updated from global requirements * Use oslo.service ServiceBase when loading from eventlet * Document use of wip up to developer * Simplify fernet rotation code * Tests for correct key removed * Relax the formats of accepted mapping rules for keystone-manage * Python 3: Use range instead of xrange for py3 compatibility 8.0.0.0b1 --------- * Document entrypoint namespaces * Short names for auth plugins * Update sample configuration file * Switch to oslo.service * Update sample configuration file * Remove redundant config * Don't try to drop FK constraints for sqlite * Remove unused requirements * Add missing keystone-manage commands to doc * Mask passwords in debug log on user password operations * Add test showing password logged * Adds some debugging statements * Imported Translations from Transifex * Use stevedore for auth drivers * Refactor extract function load_auth_method * Add unit test to exercise key rotation * Fix Fernet key rotation * Update version for Liberty 8.0.0a0 ------- * Refactor: move PKI-specific tests into the appropriate class * Needn't load fernet keys twice * Pass environment variables of proxy to tox * Fix tests failing on slower system * Mapping Engine CLI * Imported Translations from Transifex * Fix spelling in configuration comment * Switch keystone over to oslo_log versionutils * Updated from global requirements * Use lower default value for sha512_crypt rounds * Updated from global requirements * Add more Rally scenarios * Remove unnecessary dependencies from KerberosDomain * Remove deprecated external authentication plugins * Remove unnecessary code for default suffix * Remove custom assertions for python2.6 * Avoid using the interactive interpreter for a one-liner * Add validity check of 'expires_at' in trust creation * Revocation engine refactoring * Updated from global requirements * Rename directory with rally jobs files * Fix req.environ[SCRIPT_NAME] value * Don't query db if criteria longer than col length * Updated from global requirements * Run WSGI with group=keystone * Consolidate test-requirements files * Switch from deprecated isotime * Fix the wrong order of parameters when using assertEqual * Add testcases to test DefaultDomain * Remove the deprecated ec2 token middleware * Replace blacklist_functions with blacklist_calls * updates sample_data script to use the new openstack commands * Log info for Fernet tokens over 255 chars * Update functional tox env requirements * Update sample config file * Correct oauth1 driver help text * Rename driver to backend and fix the inaccurate docstring * Add "enabled" to create service provider example * Update testing keystone2keystone doc * Removes unused database setup code * Refactor: use __getitem__ when the key will exists * Refactor: create the lookup object once * Order routes so most frequent requests are first * `api_curl_examples.rst` is out of date * Don't assume project IDs are UUID format * Don't assume group IDs are UUID format * Don't fail on converting user ids to bytes * Move endpoint policy into keystone core * Update sample config file * Tests don't override default auth methods/plugins * Tests consistently use auth_plugin_config_override * Test use config_overrides for configs * Correct tests setting auth methods to a non-list * Make sure LDAP filter is constructed correctly * basestring no longer exists in Python3 * Add mocking for memcache for Python3 tests * Fix xmldsig import * Refactor deprecations tests * Switch from MySQL-python to PyMySQL * Improve websso documentation * Remove the deprecated compute_port option * Workflow documentation is now in infra-manual * Remove XML middleware stub * Rename sample_config to genconfig * Imported Translations from Transifex * Replace ci.o.o links with docs.o.o/infra * Sync oslo-incubator cc19617 * Use single connection in get_all function * Removes temporary fix for doc generation * Improve error message when tenant ID does not exist * Updated from global requirements * Add missing part for `token` object * Remove identity_api from AuthInfo dependencies * Move bandit requirement to test-requirements-bandit.txt * Adds inherited column to RoleAssignment PK * Update dev setup requirements for Python 3.4 * Update sample config file * Remove support for loading auth plugin by class * Use [] where a value is required * De-duplicate auth methods * Remove unnecessary oauth_api check * Use short names for drivers * Fixes deprecations test for Python3 * Add mocking for ldappool for Python3 tests * Fixes a whitespace issue * Handles modules that moved in Python3 * Handles Python3 builtin changes * Fixes use of dict methods for Python3 * Updated from global requirements * Replace github reference by git.openstack.org and change a doc link * Refactor _create_attribute_statement IdP method * Revert "Loosen validation on matching trusted dashboard" * Updated from global requirements * Use correct LOG translation indicator for errors * Add openstack_user_domain to assertion * Pass-in domain when testing saml signing * Fixes test nits from a previous review * Implement validation on the Identity V3 API * Fix tiny typo in comment message * Updates the *py3 requirements files * Fixes mocking of oslo messaging for Python3 * pycadf now supports Python3 * eventlet now supports Python3 * Updated from global requirements * Add openstack_project_domain to assertion * Use stevedore for backend drivers * Prohibit invalid ids in subtree and parents list * Update sample config * Fix sample policy to allow user to check own token * Replaced filter with a list comprehension * Ignore multiple imports per line for six.moves * Fixes order of imports for pep8 * pep8 whitespace changes * Remove randomness from test_client_socket_timeout * Allow wsgiref to reconstruct URIs per the WSGI spec * Fix the misuse of `versionutils.deprecated` * Updated from global requirements * Update openid connect docs to include other distros 2015.1.0 -------- * Updated from global requirements * Remove pysqlite test-requirement dependency * Fixes tests to use the config fixture * Isolate injection tests * Sync oslo-incubator Ie51669bd278288b768311ddf56ad31a2f28cc7ab * Sync oslo-incubator Ie51669bd278288b768311ddf56ad31a2f28cc7ab * Fixes cyclic ref detection in project subtree * Updated from global requirements * Updated from global requirements * Release Import of Translations from Transifex * Make memcache client reusable across threads * Imported Translations from Transifex * Remove project association before removing endpoint group * Loosen validation on matching trusted dashboard * adds a tox target for functional tests * Adds an initial functional test * Fix the incorrect comment * Set default branch to stable/kilo * Remove assigned protocol before removing IdP * Expose domain_name in the context for policy.json * Update developer doc to reference Ubuntu 14 * Make memcache client reusable across threads * Update Get API version Curl example * Remove unused policy rule for get_trust * backend_argument should be marked secret * Update man pages for the Kilo release * make sure we properly initialize the backends before using the drivers * WebSSO should use remote_id_attribute by protocol * Work with pymongo 3.0 * Fix incorrect setting in WebSSO documentation * Stops injecting revoke_api into TestCase * Checking if Trust exists should be DRY * Use correct LOG translation indicator for warnings * backend_argument should be marked secret * Fix signed_saml2_assertion.xml tests fixture * Don't provide backends from __all__ in persistence * Add domain_id checking in create_project * Update keystone.sample.conf * Use choices in config.py * make sure we properly initialize the backends before using the drivers * WebSSO should use remote_id_attribute by protocol * Refactor common function for loading drivers * Tests don't override default config with default * Refactor MemcachedBackend to not be a Manager * Update openstack-common reference in openstack/common/README * Exposes bug on role assignments creation * Removes discover from test-reqs * Work with pymongo 3.0 2015.1.0rc1 ----------- * Update man pages for the Kilo release * Add placeholders for reserved migrations * Redundant events on group grant revocation * Open Liberty development * Improved policy setting in the 'v3 filter' tests * Handle NULL value for service.extra in migration 066 * Skip SSL tests because some platforms do not enable SSLv3 * Fix the typo in `token/providers/fernet/core.py` * Fix index name the assignment.actor_id table * Add index to the revocation_event.revoked_at * Document websso setup * Allow identity provider to be created with remote_ids set to None * Update testing docs * Import fernet providers only if used in keystone-manage * Imported Translations from Transifex * Fix multiple SQL backend usage validation error * Expose multiple SQL backend usage validation error * Fix for notifications for v2 role grant/delete * Update sample config file * Fix errors in ec2 signature logic checking * Don't add unformatted project-specific endpoints to catalog * Reload drivers when their domain config is updated * Correcting the name of directory holding dev docs * Fixes bug in Federation list projects endpoint * Exposes bug in Federation list projects endpoint * Updated from global requirements * Refactor assignment driver internal clean-up method names * Remove unnecessary .driver. references in assignment manager * Rename notification for create/delete grants * Drop sql.transaction() usage in migration * Update configuration documentation for domain config * Fix for migration 062 on MySQL * Bump advertised API version to 3.4 * Extract response headers to private method * Deprecate eventlet config options * Imported Translations from Transifex * remove useless nocatalog tests of endpoint_filter * Add API to create ecp wrapped saml assertion * Add relay_state_prefix to Service Provider * Change the way values are migrated for 007_add_remote_id_table * Add routing for list_endpoint_groups_for_project * Use ORM in upgrade test instead of manual query construction * Remove empty request bodies * Remove unnecessary import that was not checked * IdP ID registration and validation * Imported Translations from Transifex * add test of /v3/auth/catalog for endpoint_filter * Entrypoints for commands * More content in the guide for core components' migration * Make trust manager raise formatted message exception * Revert "Document mapping of policy action to operation" * Remove SQL Downgrades * Add caching to getting of the fully substituted domain config * Refactor _create_projects_hierarchy in tests * Fixes bug when getting hierarchy on Project API * Exposes bug when getting hierarchy on Project API * Move common checks into base testcase * Tests use common base class * use tokens returned by delete_tokens to invalidate cache * Loosen the validation schema used for trustee/trustor ids * region.description is optional and can be null * Update access control configuration in httpd config * Document mapping of policy action to operation * Update install.rst for Fedora * Update sample config file * Remove parent_id in v2 tenant response * Tox env for Bandit * Refactor: extract and rename unique_id method * create _member_ role as specified in CONF * Fix sample policy to allow user to revoke own token * Add unit tests for sample policy token operations * Mark some strings for translation * Add fernet to test_supported_token_providers * Fix up token provider help text * Tests use Database fixture * Remove parent_id in v2 token response * Update ServiceProviderModel attributes * Add docstrings to keystone.notifications functions * Remove unused metadata parameter from get_catalog methods * Imported Translations from Transifex * Cleanup use of .driver * Specify time units for default_lock_timeout * Remove stevedore from test-requirements * Lookup identity provider by remote_id for websso * Deal with PEP-0476 certificate chaining checking * Distinguish between unset and empty black and white lists * Remove unused domain config method paramters * Correct path in request logging * Correct request logging query parameters separator * Fix setting default log levels * On creation default service name to empty string * Needn't workaround when invoking `app.request()` 2015.1.0b3 ---------- * Imported Translations from Transifex * Support upload domain config files to database * Update sample httpd config file * Update Apache httpd config docs for token persistence * Cleanup Fernet testcases and add comments * Add inline comment and docstrings fixes for Fernet * Fix nullable constraints in service provider table * Move backend LDAP role testing to the new backend testing module * URL quote Fernet tokens * Use existing token test for Fernet tokens * Implement Fernet tokens for v2.0 tokens * Refactor code supporting status in JSON Home * remove expected backtrace from logs * Log when no external auth plugin registered * Adds test for federation mapping list order issues * Updated from global requirements * Enable sensitive substitutions into whitelisted domain configs * Imported Translations from Transifex * Create a fixture for key repository * Ignore unknown groups in lists for Federation * Remove RestfulTestCase.admin_request * Remove SSL configuration instructions from HTTPd docs * Wrap apache-httpd.rst * Remove fix for migration 37 * Cleanup for credentials schema test * Refactor sql filter code for clarity * Prefer . to setattr()/getattr() * Build domain scope for Fernet tokens * Mark the domain config API as experimental * Imported Translations from Transifex * Allow methods to be carried in Fernet tokens * Federated token formatter * Refactor: make Fernet token creation/validation API agnostic * Convert audit_ids to bytes * Drop Fernet token prefixes & add domain-scoped Fernet tokens * Add JSON schema validation for service providers * Implements whitelist and blacklist mapping rules * Adding utf8 to federation tables * Eventlet green threads not released back to pool * Abstract the direct map concept into an object * Remove redundant creation timestamp from fernet tokens * Fix deprecated group for eventlet_server options * Sync oslo-incubator to f2cfbba * Cleanup test keeping unnecessary fixture references * Fix typo in name of variable in resource router * Add test to list projects by the parent_id * Fixes minor spelling issue * Crosslink to other sites that are owned by Keystone * Imported Translations from Transifex * move region and service exist checks into manager layer * make credential policy check ownership of credential * Remove unused threads argument * Refactor: remove dep on trust_api / v3 token helper * Enable use of database domain config * add oauth authentication to config file * Prevent calling waitall() inside a GreenPool's greenthread * Rename get_events to list_events on the Revoke API * Address nits for default cache time more explicit * add cadf notifications for oauth * Add scope info to initiator data for CADF notifications * Removed maxDiff attribute from TestCase * Refactoring: use BaseTestCase instead of TestCase * Moved sys.exit mocking into BaseTestClass * Refactor: move initiator test to cadf specific section * Refactor: create a common base for notification tests * Migrations squash * Consistently use oslo_config.cfg.CONF * Removes logging code that supported Python <2.7 * Refactoring: removed client method from TestCase * Refactoring: remove self._config_file_list from TestCase * Deprecate passing "extras" in token data * 'Assignment' has no attr 'get_domain_by_name' * Refactor: make extras optional in v3 get_token_data * Remove extra semicolon from mapping fixtures * Imported Translations from Transifex * Fix seconds since epoch use in fernet tokens * Add API support for domain config * Remove unused checkout_vendor * Move test_core to keysteone.tests.unit.tests * Fixes the SQL model tests * Add documentation for key terms and basic authenticating * Remove useless comment from requirements.txt * Move pysaml to requirements.txt for py3 * Docstring fixes in fernet.token_formatters * Made project_id required for ec2 credential * Add Federation mixin for setting up data * Refactor: remove token formatters dep on 'token_data' on create() * Refactor: rename the "standard" token formatter to "scoped" * Add unscoped token formatter for Fernet tokens * Fix the wrong order of parameters when using assertEqual * Imported Translations from Transifex * Spelling and grammar cleanup * Fixes bug in SQL/LDAP when honoring driver_hints * Remove policy parsing exception * Cleanup policy related tests * Remove incubated version of oslo policy * Use oslo.policy instead of incubated version * Fixes minor whitespace issues * Updated from global requirements * Add checking for existing group/option to update domain config * Stop debug logging of Ldap while running unit tests * Exposes bug in SQL/LDAP when honoring driver_hints * Updated from global requirements * Fix typos in tests/unit/core.py * Remove unnecessary import * Update developer docs landing page * Add support for whitelisting and partial domain configs * Change headers to be byte string friendly * fix import order in federation controller * Imported Translations from Transifex * Fix a minor coding nit in Fernet testing * Move install of cryptography before six * refactor: extract and document audit ID generation * Update sample config file * log query string instead of openstack.params and request args * Cleanup docstrings in test_v3_federation.py * refactor: consistently refer to "unpacked tokens" as the token's "payload" * refactor: extract fernet packing & unpacking methods * Fix nits from 157495 * Deprecate Eventlet Deployment in favor of wsgi containers * remove old docstr referring to keyczar * Implement backend driver support for domain config * Use revocation events for lightweight tokens * Avoid multiple instances for a provider * Always load revocation manager * Cleanup comments from 159865 * Updated from global requirements * Rename "Keystone LightWeight Tokens" (KLWT) to "Fernet" tokens * Make the default cache time more explicit in code * Keystone Lightweight Tokens (KLWT) * Refactor and provide scaffolding for domain specific loading * Populate token with service providers * Add CADF notifications for trusts * Get initiator from manager and send to controller * Add in non-decorator notifiers * Implemented caching in identity layer * Imported Translations from Transifex * Use dict comprehensions instead of dict constructor * Remove deprecated methods and functions in token subsystem * Authenticate local users via federated workflow * Move UserAuthInfo to a separate file * Make RuleProcessor._UserType class public * Enhance user identification in mapping engine * Remove conditional check (and test) for oauth_api * Fixes test_multiple_filters filters definition * Remove conditionals that check for revoke_api * Use correct dependency decorator * Add minimum release support notes for federation * Update `os service create` examples in config services * Reference OSC docs in CLI examples * Chain a trust with a role specified by name * Add parent_id to test_project_model * Revamp the documentation surrounding notifications * Remove unused tmp directory in tests * Correct initialization order for logging to use eventlet locks * add missing links for v3 OS-EC2 API response * Remove explicit mentions of JSON from test_v2 * Rename test_keystoneclient* * Rename test_content_types * Fix for KVS cache backend incompatible with redis-py * Enable endpoint_policy, endpoint_filter and oauth by default * Add links to extensions that point to api specs * Classifying extensions and defining process * Imported Translations from Transifex * Add oslo request id middleware to keystone paste pipeline * Uses SQL catalog driver for v2 REST tests * Fixed skip msg in templated catalog test * Remove invalid comment/statement at role manager * Standardize notifications types as constants * Change use of random to random.SystemRandom * Remove extra call to oauth manager from tests * Remove an extra call to create federation manager * Updated from global requirements * Imported Translations from Transifex * Improve List Role Assignment Tests * Enable filtering in LDAP backend for listing entities * Refactor filter and sensitivity tests in prepartion for LDAP support * Imported Translations from Transifex * Provide additional detail if OAuth headers are missing * Add WebSSO support for federation * Check consumer and project id before creating request token * Regenerate sample config file * Move eventlet server options to a config section * refactor: use _get_project_endpoint_group_url() where applicable * Update sample config file * Consistently use oslo_config.cfg.CONF * Imported Translations from Transifex * Removes unnecessary checks when cleaning a domain * Remove check_role_for_trust from sample policies * Remove duplicated test for get_role * Add a test for create_domain in notifications * Add CADF notification handling for policy/region/service/endpoint * Publicize region/endpoint/policy/service events * Add CADF notifications for most resources * Updated from global requirements * Drop foreign key (domain_id) from user and group tables * Make federated domain configurable * Imported Translations from Transifex * Move backend role tests into their own module * Fix nits from patch #110858 * Fix invalid super() usage in memcache pool * Add a domain to federated users * Wrap dependency registry * Remove unnecessary code setting provider * Fix tests to not load federation manager twice * Fix places where role API calls still called assignment_api * fix a small issue in test_v3_auth.py * Imported Translations from Transifex * rename cls in get_auth_context to self * make tests of endpoint_filter check endpoints num * remove the Conf.signing.token_format option support * Remove list_endpoint_groups_for_project from sample policies * Add get_endpoint_group_in_project to sample policy files * Check for invalid filtering on v3/role_assignments * Remove duplicate token revocation check * Remove incubator version of log and local * Use oslo.log instead of incubator * Move existing tests to unit * Cleanup tests to not set multiple workers * Use subunit-trace from tempest-lib * Log exceptions safely * Imported Translations from Transifex * Refactor _send_audit_notification * Updated from global requirements * Remove excess brackets in exception creation * Update policy doc to use new rule format * remove the unused variables in indentity/core.py * fix assertTableColumns * Imported Translations from Transifex * make federation part of keystone core * Small cleanup of cloudsample policy * Fix error message on check on RoleV3 * Improve creation of expected assignments in tests * Add a check to see if a federation token is being used for v2 auth * Adds a fork of python-ldap for Py3 testing * Updates Python3 requirements * Sync with oslo-incubator * Add local rules in the federation mapping tests * Don't try to convert LDAP attributes to boolean * Add schema for endpoint group * Split the assignments controller * Use _VersionsEqual for a few more version tests * Remove test PYTHONHASHSEED setting * Correct version tests for result ordering * Correct a v3 auth test for result ordering * Correct catalog response checker for result ordering * Correct test_get_v3_catalog test for result ordering * Correct test_auth_unscoped_token_project for result ordering * Fix the syntax issue on creating table `endpoint_group` * Change hacking check to verify all oslo imports * Change oslo.i18n to oslo_i18n * Change oslo.config to oslo_config * Change oslo.db to oslo_db * Remove XMLEquals from tests * Remove unused test case * Don't coerce port config values * Make identity id mapping handle unicode * Improve testing of unicode id mapping * Add new "RoleAssignment" exception * Imported Translations from Transifex * log wsgi requests at INFO level * Fix race on default role creation * Imported Translations from Transifex * Unscoped to Scoped only * Refactor federation SQL backend 2015.1.0b2 ---------- * Set initiators ID to user_id * Updated from global requirements * Change oslo.messaging to oslo_messaging * Change oslo.serialization to oslo_serialization * Handle SSL termination proxies for version list * Imported Translations from Transifex * Update federation config to use Service Providers * Drop URL field from region table * Create K2K SAML assertion from Service Provider * Service Providers API for OS-FEDERATION * Implements subtree_as_ids query param * Refactor role assignment assertions * Fixes 'OS-INHERIT:inherited_to' info in tests * During authentication validate if IdP is enabled * Fix typo in Patch #142743 * Make the LDAP dependency clear between identity, resource & assignment * Implements parents_as_ids query param * Internal notifications for cleanup domain * Multiple IDP authentication URL * Change oslo.utils to oslo_utils * Imported Translations from Transifex * Regenerate sample config file * Make unit tests call the new resource manager * Make controllers and managers reference new resource manager * Remove unused pointer to assignment in identity driver * Move projects and domains to their own backend * Make role manager refer to role cache config options * Documentation fix for Keystone Architecture * Imported Translations from Transifex * Fix evaluation logic of federation mapping rules * Deprecate LDAP Assignment Backend * Fix up _ldap_res_to_model for ldap identity backend * Remove local conf information from paste-ini * Use RequestBodySizeLimiter from oslo.middleware * Adds a wip decorator for tests * Remove list_user_projects method from assignment * Updated from global requirements * Remove unnecessary code block of exception handling * Updated from global requirements * Add library oslo.concurrency in config-generator config file * Updated from global requirements * Explicit Unscoped * add missing API in docstring of EndpointFilterExtension * fix test_ec2_list_credentials * Assignment sql backend create_grant refactoring * Updated from global requirements * Imported Translations from Transifex * Remove TODO comment which has been addressed * Refactor keystone-all and http/keystone * Updated from global requirements * Identify groups by name/domain in mapping rules * do parameter check before updating endpoint_group * Move sql specific filter test code into test_backend_sql * Fix incorrect filter test name * Update the keystone sample config * Minor fix in RestfulTestCase * Scope federated token with 'token' identity method * Correct comment about circular dependency * Refactor assignment manager/driver methods * Make unit tests call the new, split out, role manager * Make controllers call the new, split out, role manager * Correct doc string for grant driver methods * Split roles into their own backend within assignments * correct the help text of os_inherit * Update Inherited Role Assignment Extension section * Limit lines length on configuration doc * Fixes spacing in sentences on configuration doc * Fixes several typos on configuration doc * Trust redelegation * add missing parent_id parameter check in project schema * Fix incorrect session usage in tests * Fix migration 42 downgrade * Updated from global requirements * Additional test coverage for password changes * Fix downgrade test for migration 61 on non-sqlite * Fix transaction issue in migration 44 downgrade * Correct failures for H238 * Move to hacking 0.10 * Updated from global requirements * Remove unused fields in base TestCase * Keystoneclient tests from venv-installed client * Fix downgrade from migration 61 on non-sqlite * explicit namespace prefixes for SAML2 assertion * Remove requirements not needed by oslo-incubator modules anymore * Remove unused testscenarios requirement * Cleanup test-requirements for keystoneclient * Fix tests using extension drivers * Ensure manager grant methods throw exception if role_id is invalid * update sample conf using latest oslo.conf * Remove unnecessary oslo incubator bits * let endpoint_filter sql backend return dict data * Tests fail only on deprecation warnings from keystone * switch from sample_config.sh to oslo-config-generator * Add positive test case for content types * Update the keystone.conf sample * remove invalid note * invalidate cache when updating catalog objects * Enable hacking rule H302 * fix wrong self link in the response of endpoint_groups API * Imported Translations from Transifex * improve the EP-FILTER catalog length check in test_v3.py * Don't allow deprecations during testing * Fix to not use deprecated Exception.message * Integrate logging with the warnings module * rename oslo.concurrency to oslo_concurrency * Fix to not use empty IN clause * Be more precise with flake8 filename matches * Use bashate to run_tests.sh * Move test_utils to keystone/tests/unit/ * add circular check when updating region * fix the wrong update logic of catalog kvs driver * Removes a Py2.6 version of assertSetEqual * Removes a Py2.6 version of inspect.getcallargs * Removes a bit of WSGI code converts unicode to str * Expanded mutable hacking checks * Make the mutable default arg check very strict * sync to oslo commit 1cf2c6 * Update federation docs to point to specs.o.org * Memcache connection pool excess check * Always return the service name in the catalog * Update docs to no longer show XML support 2015.1.0b1 ---------- * Check and delete for policy_association_for_region_and_service * Remove unnecessary ldap import * Rename `removeEvent` to be more pythonic * Fix the way migration helpers check FK names * Remove XML support * Fix modifying a role with same name using LDAP * Add a test for modifying a role to set the name the same * Fix disabling entities when enabled is ignored * Add tests for enabled attribute ignored * Cleanup eventlet use in tests * Fix update role without name using LDAP * Add test for update role without name * Inherited role assignments to projects * Updated from global requirements * Fix inherited user role test docstring * Fixes links in Shibboleth configuration docs * Updated from global requirements * fix wrong indentation in contrib/federation/utils.py * Adds openSUSE support for developer documentation * User ids that begin with 0 cannot authenticate through ldap * Typo in policy call * Updated from global requirements * Remove endpoint_substitution_whitelist config option * Correct max_project_tree_depth config help text * Adds correct checks in LDAP backend tests * Updated from global requirements * Add an identity backend method to get group by name * Create, update and delete hierarchical projects * drop developer support for OS X * Ignore H302 - bug 1398472 * Remove irrelative comment * remove deprecated access log middleware * Multiple IdPs problem * Fixes docstring at eventlet_server * Fix the copy-pasted help info for db_version * Updated from global requirements * TestAuthPlugin doesn't use test_auth_plugin.conf * Add missing translation marker for dependency * Use _ definition from keystone.i18n * Remove Python 2.6 classifier * Correct token flush logging * Speed up memcache lock * Moves hacking tests to unit directory * Fixes create_saml_assertion() return * Add import i18n to federation/controllers.py * Correct use of config fixture * Extends hacking check for logging to verify i18n hints * Adds missing log hints for level E/I/W * make sample_data.sh account for the default options in keystone.conf * Adds dynamic checking for mapped tokens * Updated from global requirements * Enable cloud_admin to list projects in all domains * Remove string from URL in list_revoke_events() * Configuring Keystone edits * Update keystone readme to point to specs.o.org * Imported Translations from Transifex * Add WSGIPassAuthorization to OAuth docs * Increase test coverage of test_versions.py * Move test_pemutils.py to unit test directory * Don't return ``user_name`` in mapped.Mapped class * Increase test coverage of test_base64utils.py * Move base64 unit tests to keystone/tests/unit dir * Move injection unit tests to keystone/tests/unit * Move notification unit tests to unit test dir * Allow for REMOTE_USER name in federation mapping * Doc about specifying domains in domains specific backends * Remove useless field passed into SQLAlchemy "distinct" statement * Exclude domains with inherited roles from user domain list * Improve testing of exclusion of inherited roles * Fix project federation tokens for inherited roles * Improve testing of project federation tokens for inherited roles * Fix domain federation tokens for inherited roles * Improve testing of domain federation tokens for inherited roles * Fix misspelling at configuration.rst file * Remove duplicate setup logic in federation tests * Imported Translations from Transifex * Enable hacking rule H904 * Move shib specific documentation * Additional debug logs for federation flows * Add openid connect support * Imported Translations from Transifex * Enable hacking rule H104 File contains nothing but comments * Rename _handle_saml2_tokens() method * Updated from global requirements * Update references to auth_token middleware * Use true() rather than variable/singleton * Change ca to uppercase in keystone.conf * default revoke driver should be the non-deprecated driver * Prevent infinite loop in token_flush * Adds IPv6 url validation support * Provide useful info when parsing policy file * Doc about deleting a domain specific backend domain * Updated from global requirements * Remove token persistence proxy * Correct use of noqa * Use oslo.concurrency instead of sync'ed version * revise error message for keystone.token.persistence pkg * Change config option examples to v3 * Sync modules from oslo-incubator * test_utils use jsonutils from oslo.serialization * Add fileutils module * Move check_output and git() to test utils * Remove nonexistant param from docstring * Fixes aggressive use of translation hints * PKI and PKIZ tokens unnecessary whitespace removed * Move unit tests from test_backend_ldap * Use correct name of oslo debugger script * Updated from global requirements * Imported Translations from Transifex * Change /POST to /ECP at federation config * Base methods to handle hierarchical projects * use expected_length parameter to assert expected length * fix the wrong order of assertEqual args in test_v3 * sys.exit mock cleanup * Tests raise exception if logging problem * Correct the code path of implementation for the abstract method * Use newer python-ldap paging control API * Add xmlsec1 dependency comments * Add parent_id field to projects * Add max-complexity to pep8 for Keystone * Remove check_password() in identity.backend.ldap * Restrict certain APIs to cloud admin in domain-aware policy * Remove unused ec2 driver option * Extract Assignment tests from IdentityTestCase * Clean up federated identity audit code * obsolete deployment docs * Remove database setup duplication * Fixes endpoint_filter tests * Fixes a spelling error in hacking tests * Fixes docstrings to be more accurate * Update the feature/hierarchical-multitenancy branch * Updated from global requirements 2014.2 ------ * updated translations * Remove deprecated KVS trust backend * Imported Translations from Transifex * Ensure sql upgrade tests can run with non-sqlite databases * Ensure sql upgrade tests can run with non-sqlite databases * Validates controller methods exist when specified * Fixes an error deleting an endpoint group project * Add v3 openstackclient CLI examples * Update the CLI examples to also use openstackclient * Replace an instance of keystone/openstack/common/timeutils * Use importutils from oslo.utils * Use jsonutils from oslo.serialization * Update 'Configuring Services' documentation * Use openstackclient examples in configuration documentation * Validates controller methods exist when specified * Fixes an error deleting an endpoint group project * Switch LdapIdentitySqlAssignment to use oslo.mockpatch * Fix tests comparing tokens * Remove deprecated TemplatedCatalog class * Remove images directory from docs * Remove OS-STATS monitoring * Remove identity and assignment kvs backends * Add an XML code directive to a shibboleth example * revise docs on default _member_ role * Convert unicode to UTF8 when calling ldap.str2dn() * Fix tests comparing tokens * Fix parsing of emulated enabled DN * Handle default string values when using user_enabled_invert * Handle default string values when using user_enabled_invert * Convert unicode to UTF8 when calling ldap.str2dn() * Fix parsing of emulated enabled DN * Add test for getting a token with inherited role * wrong logic in assertValidRoleAssignmentListResponse method * Open Kilo development 2014.2.rc1 ---------- * Enhance FakeLdap to require base entry for subtree search * Imported Translations from Transifex * Uses session in migration to stop DB locking * Address some late comments for memcache clients * Set issuer value to CONF.saml.idp_entity_id * Updated from global requirements * Add placeholders for reserved migrations * Mark k2k as experimental * Add version attribute to the SAML2 Assertion object * New section for CLI examples in docs * Fix failure of delete domain group grant when identity is LDAP * Clean up the Configuration documentation * Adding an index on token.user_id and token.trust_id * Update architecture documentation * Fix a spelling mistake in keystone/common/utils.py * Imported Translations from Transifex * Prevent infinite recursion on persistence core on init * Read idp_metadata_path value from CONF.saml * Remove duplicated assertion * Fix create and user-role-add in LDAP backend * Fix minor spelling issues in comments * Add a pool of memcached clients * Update URLs for keystone federation configuration docs * add --rebuild option for ssl/pki_setup * Mock doesn't have assert_called_once() * Do not run git-cloned ksc master tests when local client specified * Add info about pysaml2 into federation docs * Imported Translations from Transifex * Remove unused cache functions from token.core * Updated from global requirements * Safer check for enabled in trusts * Set the default number of workers when running under eventlet * Add the processutils from oslo-incubator * Update 'Configure Federation' documentation * Ensure identity sql driver supports domain-specific configuration * Allow users to clean up role assignments * Adds a whitelist for endpoint catalog substitution * Revoke the tokens of group members when a group role is revoked * Change pysaml2 comment in test-requrements.txt * Document Keystone2Keystone federation * Set LDAP certificate trust options for LDAPS and TLS * Fail on empty userId/username before query * Refactor FakeLdap to share delete code * ldap/core deleteTree not always supported * Reduce unit test log level for notifications * Fix delete group cleans up role assignments with LDAP * Refactor LDAP backend using context manager for connection * Fix fakeldap search_s documentation * Add delete notification to endpoint grouping * Fix using local ID to clean up user/group assignments * Add characterization test for cleanup role assignments for group * Fix LDAP group role assignment listing * Correct typos in keystone/common/base64utils.py docstrings * Add V3 JSON Home support to GET / * Ensure a consistent transactional context is used * Adds hint about filter placement to extension docs * Adds pipeline hints to the example paste config * Make the extension docs a top level entry in the landing page * LDAP: refactor use of "1.1" OID * Fix Policy backend driver documentation * improve dependency injection doc strings * Document mod_wsgi doesn't support chunked encoding * Making KvsInheritanceTests use backend KVS * Keystone local authenticate has an unnecessary pending audit record * Use id attribute map for read-only LDAP * Stop skipping LDAP tests * Update the revocation configuration docs * Fixes formatting error in debug log statement * Remove trailing space from string * Update paste pipelines in configuration docs * Update man pages * Updates package comment to be more accurate * Fixed typo 'in sane manner' to 'in a sane manner' * Enable filtering of services by name * correct typos * Fixes code comment to be more accurate * Prevent domains creation for the default LDAP+SQL * Add testcase for coverage of 002_add_endpoint_groups * Fix oauth sqlite migration downgrade failure * Sync jsonutils from oslo-incubator 32e7f0b5 * Imported Translations from Transifex * Avoid conversion of binary LDAP values * Remove unused variable TIME_FORMAT * Add characterization test for group role assignment listing * Fix dn_startswith * Use oslo_debug_helper and remove our own version * Fixes a mock cleanup issue caused by oslotest * Add rst code-blocks to a bunch of missing examples * Capitalize all instances of Keystone in the docs 2014.2.b3 --------- * Update the docs that list sections in keystone.conf * Fixed spelling mistakes in comments * use one indentation style * Fix admin server doesn't report v2 support in Apache httpd * Add test for single app loaded version response * Work toward Python 3.4 support and testing * Update the federation configuration docs for saml2 * Add docs for enabling endpoint policy * warn against sorting requirements * Adds region back into the catalog endpoint * Remove extra V3 version router * Implementation of Endpoint Grouping * Fix minor nits for token2saml generation * Routes for Keystone-IdP metadata endpoint * Generate IdP Metadata with keystone-manage * IdP SAML Metadata generator * Implement validation on Trust V3 API * Create SAML generation route and controller * trustor_user_id not available in v2 trust token * Transform a Keystone token to a SAML assertion * Remove TODO that was done * Fix region schema comment * Remove unused _validate_endpoint * Fix follow up review issues with endpoint policy backend patch * controller for the endpoint policy extension * Mark the revoke kvs backend deprecated, for removal in Kilo * Fix logging config twice * Implement validation on the Catalog V3 API * General logging cleanup in keystone.notifications * Lower log level for notification registration * backend for policy endpoint extension * Implement validation on Credential V3 * Implement validation on Policy V3 API * Fix token flush fails with recursion depth exception * Spelling errors fixed in the comments * Add index for actor_id in assignments table * Endpoint table is missing reference to region table * add missing log hints for level C/E/I/W * Add audit support to keystone federation * Add string id type validation * Implement validation on Assignment V3 API * Adds tests that show how update with validation works * Mark the trust kvs backend deprecated, for removal in Kilo * Test cleanup: do not leak FDs during test runs * Do not load auth plugins by class in tests * JSON Home data is required * Cleanup superfluous string comprehension and coersion * Add commas for ease of maintenance * Comments to docstrings for notification emit methods * Notification cleanup: namespace actions * Mark kvs backends as deprecated, for removal in Kilo * Add bash code style to some portions of configuration.rst * Update sample config * Update tests to not use token_api * Make persistence manager in token_provider_api private * Enhance GET /v3 to handle Accept header * Enhance V3 extensions to provide JSON Home data * Enhance V3 extension class to integrate JSON Home data * Change OS-INHERIT extension to provide JSON Home data * Change the sub-routers to provide JSON Home data * Change V3 router classes to provide JSON Home data * Create additional docs for role assignment events * Add libxmlsec1 as external package dependency on OS X * Add __repr__ to KeystoneToken model * Add extra guarding to revoke_by_audit_id methods * Mark methods on token_api deprecated * Remove SAML2 plugin dependency on token_api * Remove oauth controller dependency on token_api * Remove assignment_api dependency on token_api * Notification Constant Cleanup and internal notify type * Remove wsgi and base controller dependency on token_api * Remove identity_api dependency on token_api * Remove trust dependency on token_api * Update AuthContextMiddleware to not use token_api * Revoke by Audit Id / Audit Id Chain instead of expires * assignment controller error path fix * Make SQL the default backend for Identity & Assignment unit tests * Add CADF notifications for role assignment create and delete * Add notifications for policy, region, service and endpoint * Enhance V3 version controller to provide JSON Home response * Provide the V3 routers to the V3 extension controller * Enhance V3 routers to store basic resource description * Correct the signature for some catalog abstract method signatures * Convert to urlsafe base64 audit ids * Sync Py2 and Py3 requirements files * Sync with oslo-incubator * Add audit ids to tokens * Fixing simple type in comment * Create authentication specific routes * Standardizing the Federation Process * Enable filtering of credentials by user ID * Expose context to create grant and delete grant * Redirect stdout and stderr when using subprocess * Back off initial migration to 34 * Back off initial migration to 35 * Use python convention for function names in test_notifications * Use mail for the default LDAP email attribute name * Bump hacking to 0.9.x series * Fixes an issue with the XMLEquals matcher * Do not require method attribute on plugins * Remove _BaseFederationExtension * Add a URL field to region table * Remove unnecessary declaration of CONF * Remove trailing space in tox.ini * Rename bash8 requirement * Updates the sample config * remove unused import * Clean whitespace off token * Support the hints mechanism in list_credentials() * Keystone service throws error on receiving SIGHUP * Remove strutils and timeutils from openstack-common.conf * Use functions in oslo.utils * Add an OS-FEDERATION section to scoped federation tokens * Ensure roles created by unit tests have correct attributes * Update control_exchange value in keystone.conf * swap import order of lxml * add i18n to lxml error * Check for empty string value in REMOTE_USER * Refactor names in catalog backends * Update CADF auditing example to show non-payload information * Remove ec2 contrib dependency on token_api * Expose token revocation list via token_provider_api * Remove assignment controller dependency on token_api * Refactor serializer import to XmlBodyMiddleware * Delete intersphinx mappings * Fix documentation link * Make token_provider_api contain token persistence * Remove S3 middleware tests from tox.ini * Remove unused function * Add oslo.utils requirement * Surround REMOTE_USER variable name with quotes * Remove `with_lockmode` use from Trust SQL backend * Allow LDAP lock attributes to be used as enable attributes * Improve instructions about federation * Do not override venvs * Imported Translations from Transifex * Remove debug CADF payload for every authN request * Don't override tox envdir for pep8 and cover jobs * Change V3 extensions to use resources * Enhance V3 extension class to use resources * V3 Extension class * Change V3 router classes to use resources * Enhance V3 router class for resources * Class for V3 router packages * Filter List Regions by 'parent_region_id' * Refactor existing endpoint filter tests * Trust unit tests should target additional threat scenarios * Update the config file * Fix revocation event handling with MySQL * Set default token provider to UUID * Add filters to the collections 'self' link * Issue multiple SQL statements in separate engine.execute() calls * Remove fixture from openstack-common.conf * Use config fixture from oslo.config * Fix revoking a scoped token from an unscoped token * Updated from global requirements * KeyError instead of exception.KeyError * Catch correct oslo.db exception * Update setup docs with Fedora 19+ dependencies * Add a test for revoking a scoped token from an unscoped * Fix revoking domain-scoped tokens * Correct revocation event test for domain_id * Add pluggable range functions for token flush * Configurable python-keystoneclient repo * Fix invalid self link in get access token * Add workaround to support tox 1.7.2 * Fixes a capitalization issue * Do not consume trust uses when create token fails * Refactor set domain-id and mapping code * Remove duplicated asserts * Fix for V2 token issued_at time changing * Add tests related to V2 token issued_at time changing * Sample config update * Add the new Keystone TokenModel * Add X-Auth-Token header in federation examples * Check url is in the 'self' link in list responses * Clean up EP-Filter after delete project/endpoint * add internal delete notification for endpoint * remove static files from docs * Move token persistence classes to token.persistence module * cache the catalog * Disable a domain will revoke tokens under the same domain * Sqlite files excluded from the repo * Adding support for ldap connection pooling * Details the proper way to call a callable 2014.2.b2 --------- * Add the new oslo.i18n as a dependency for Python 3 * Fixes test_exceptions.py for Python3 * Fixes test_wsgi for Python3 * Adds several more test modules that pass on Py3 * Reduces the amount of mocked imports for Python 3 * Disables LDAP unit tests * Updated from global requirements * Initial implementation of validator * Mark the 'check_vX_token' methods deprecated * Extracting get group roles for project logic to drivers * implement GET /v3/catalog * Adds coverage report to py33 test runs * Fixed tox cover environment to share venv * Regenerate sample config file * Check that region ID is not an empty string * auth tests should not require admin token * Example JSON files should be human-readable * Consolidate `assert_XXX_enabled` type calls to managers * Move keystone.token.default_expire_time to token.provider * Move token_api.unique_id to token_provider_api * Capitalize a few project names in configuring services doc * Fixes a Python3 syntax error * Introduce pragma no cover to asbtract classes * Update middleware that was moved to keystonemiddleware * Sync with oslo-incubator * project disabled/deleted notification recommendations * render json examples with syntax highlighting * Use oslo.i18n * Make sure unit tests set the correct log levels * Clean up the endpoint filtering configuration docs * Avoid loading a ref from SQL to delete the ref * Add revocation extension to default pipeline * multi-backend support for identity * Update docs to reflect new db_sync behaviour * Migrate default extensions * Add oslo.i18n as dependency * Do not use lazy translation for keystone-manage * Update the configuration docs for the revocation extension * Remove deprecated token_api.list_tokens * Imported Translations from Transifex * Add keystonemiddleware to requirements * Add _BaseFederationExtension class * Correct the region table to be InnoDB and UTF8 * HEAD responses should return same status as GET * Updated from global requirements * Sync with oslo-incubator e9bb0b59 * Add schema check for OS-FEDERATION mapping table * Make OS-FEDERATION core.Driver methods abstract * update example with a status code we actually use * Correct docstring for assertResponseSuccessful * Fix the section name in CONTRIBUTING.rst * Fix OAuth1 to not JSON-encode create access token response * Ending periods in exception messages deleted * Ensure that in v2 auth tenant_id matches trust * Add identity mapping capability * Do not use keystone's config for nova's port * Fix docs and scripts for pki_setup and ssl_setup * LDAP: Added documentation for debug_level option * Updated from global requirements * Fixes the order of assertEqual arguments * remove default=None for config options * Fix test for get_*_by_name invalidation * Do not support toggling key_manglers in cache layer * Implicitly ignore attributes that are mapped to None in LDAP * Move bash8 to run under pep8 tox env * Remove db, db.sqlalchemy from openstack-common.conf * Remove backend_entities from backend_ldap.conf * Consolidate provider calls to token_api.create_token * Adds hacking check for debug logging translations * Updates Python3 requirements to match Python2 * Adds oslo.db support for Python 3 tests * Do not leak SQL queries in HTTP 409 (conflict) * Imported Translations from Transifex * Do not log 14+ INFO lines on a broken pipe error (eventlet) * Regenerate sample config file * deprecate LDAP config options for 'tenants' * the user_tenant_membership table was replaced by "assignment" * Corrects minor spelling mistakes * Ignoring order of user list in TenantTestCase * Make gen_pki.sh & debug_helper.sh bash8 compliant * TestAuthInfo class in test_v3_auth made more efficient * Update docs to reference #openstack-keystone * Don't set sqlite_db default * Migrate ID generation for users/groups from controller to manager * oslo.db implementation * Test `common.sql` initialization * Kerberos as method name * test REMOTE_USER does not authenticate * Document pkiz as provider in config * Only emit disable notifications for project/domain on disable * Fix the typo and reformat the comments for the added option * Updated from global requirements * fix flake8 issues * Update sample keystone.conf file * Fix 500 error if request body is not JSON object * Default to PKIZ tokens * Fix a few typos in the shibboleth doc * pkiz String conversion * Fixes catalog URL formatting to never return None * Updates keystone.catalog.core.format_url tests * Ignore broken endpoints in get_catalog * Allow for multiple PKI Style Providers * Add instructions for removing pyc files to docs * Password trunction makes password insecure * enable multiple keystone-all worker processes * Add cloud auditing notification documentation * Block delegation escalation of privilege * Fixes typo error in Keystone * Add missing docstrings and 1 unittest for LDAP utf-8 fixes * Properly invalidate cache for get_*_by_name methods * Make sure domains are enabled by default * Convert explicit session get/begin to transaction context 2014.2.b1 --------- * remove unnecessary word in docs: 'an' * add docs on v2 & v3 support in the service catalog * Add v3 curl examples * Use code-block for curl examples * Sync service module from oslo-incubator * remove unneeded definitions of Python Source Code Encoding * gitignore etc/keystone/ * Enforce ``saml2`` protocol in Apache config * install gettext on OS X for msgfmt * Use translation hints * Add v2 & v3 API documentation * Make sure all the auth plugins agree on the shared identity attributes * update release support warning for domain-specific drivers * Catalog driver generates v3 catalog from v2 catalog * Compressed Token Provider * document keystone-specs instead of LP blueprints in README * fixed several pep8 issues * Invalid command referenced in federation documentation * Fix curl example refs in docs * pep8: do not test locale files * Consistenly use jsonutils instead of json * Fix type error message in format_url * Updated from global requirements * remove out of date docs for Fedora 15 * Make sure scoping to the project of a disabled domain result in 401 * document pki_setup and ssl_setup in keystone.conf.sample * Fixed wrong behavior when updating tenant or user with LDAP backends * Cleanup openstack-common.conf and sync from olso * recommend excluding 35357 from ephemeral ports * Fixes duplicated DELETE queries on SQL backends * Refactor tests regarding required attributes * Suggest users to remove REMOTE_USER from shibd conf * Refactor driver_hints * Imported Translations from Transifex * Code which gets and deletes elements of tree was moved to one method * indicate that sensitive messages can be disabled * Check that the user is dumb moved to the common method * Fix spelling mistakes in docs * Replace magic value 'service/security' in CadfNotificationWrapper * Replace assertTrue and assertFalse with more suitable asserts * replaced unicode() with six.text_type() * Remove obsolete note from ldap * install from source docs never actually install the keystone service * LDAP fix for get_roles_for_user_and_project user=group ID * Cleanup of ldap assignment backend * Remove all mostly untranslated PO files * Mapping engine does not handle regex properly * SQL fix for get_roles_for_user_and_project user=group ID * Unimplemented get roles by group for project list * sql migration: ensure using innodb utf8 for assignment table * Update mailmap entry for Brant * Reduce log noise on expired tokens * Add note for v3 API clients using auth plugin docs * Refactor test_auth trust related tests * Add detailed federation configuration docs * remove a few backslash line continuations * Reduce excess LDAP searches * Regenerate sample config * Fix version links to docs.openstack.org * Add mailmap entry * Refactor create_trust for readability * Adds several more tests to the Python 3 test run * Fixed the policy tests in Python 3 * Fixed the size limit tests in Python 3 * fixed typos found by RETF rules in RST files * Remove the configure portion of extension docs * Ensure token is a string * Fixed some typos throughout the codebase * Allow 'description' in V3 Regions to be optional * More random values for oAuth1 verifier * Add rally performance gate job for keystone * Set proper DB_INIT_VERSION on db_version command * Escape values in LDAP search filters * Migration DB_INIT_VERSION in common place * Redundant unique constraint * Correct `nullable` values in models and migrations * Move hacking code to a separate fixture * Some methods in ldap were moved to superclass * Sync with oslo-incubator 28fba9c * Use oslo.test mockpatch * Check that all po/pot files are valid * No longer allow listing users by email * Refactor notifications * Add localized response test * Refactor service readiness notification * Make test_revoke expiry times distinct * Removed duplication with list_user_ids_for_project * Fix cache configuration checks * setUp must be called on a fixture's parent first * First real Python 3 tests * Make the py33 Jenkins job happy * Fix the "search for sql.py" files for db models * Sync with oslo-incubator 74ae271 * no one uses macports * Updated from global requirements * Compatible server default value in the models * Explicit foreign key indexes * Added statement for ... if ... else * Imported Translations from Transifex * Ignore broken endpoints in get_v3_catalog * Fix typo on cache backend module * Fix sql_upgrade tests run by themselves * Discourage use of pki_setup * add dependencies of keystone dev-enviroment * More efficient DN list for LDAP role delete * Stronger assertion for test_user_extra_attribute_mapping * Refactor test_password_hashed to the backend testers * Remove LDAP password hashing code * More notification unit tests * Add missing import, remove trailing ":" in middleware example * Fixes for in-code documentation * Isolate backend loading * Sync with oslo-incubator 2fd457b * Adding one more check on project_id * Moves test database setup/teardown into a fixture * Make the LDAP debug option a configurable setting * Remove unnecessary dict copy * More debug output for test * Code which gets elements of tree in ldap moved to a common method * Removed unused code * Don't re-raise instance * Fix catalog Driver signatures * Include extra attributes in list results * Allow any attributes in mapping * Enhance tests for user extra attribute mapping * Fix typo of ANS1 to ASN1 * Updated from global requirements * Refactor: moved flatten function to utils * Collapse SQL Migrations * Treat LDAP attribute names as case-insensitive * replace word 'by' with 'be' * Configurable token hash algorithm * Adds style checks to ease reviewer burden * Adding more descriptive error message * Fixed wrong behavior in method search_s in BaseLdap class * Fix response for missing attributes in trust * Refactor: move federation functions to federation utils * List all forbidden attributes in the request body * Convert test_backend_ldap to config fixture * Add tests for user ID with comma * Fix invalid LDAP filter for user ID with comma * Remove assignment proxy methods/controllers * Remove legacy_endpoint_id and enabled from service catalog * Replace all use of mox with mock * Fix assertEqual arguments order(catalog, cert_setup, etc) * Remove common.V3Controller.check_required_params() method * Fix parallel unit tests keystoneclient partial checkout * Sync from oslo db.sqlalchemy.migration * Removes unused db_sync methods * Removes useless wrapper from manager base class * Cleanup of test_cert_setup tests * Sanitizes authentication methods received in requests * Fix create_region_with_id raise 500 Error bug * For ldap, API wrongly reports user is in group * support conventional domain name with one or more dot * Remove _delete_tokens function from federation controller * Keystone doesn't use pam * Fixed small capitalization issue * Fix Jenkins translation jobs * Removes some duplicate setup from a testcase * Updated from global requirements * Enable concurrent testing by default * Cleanup ldap tests (mox and reset values) * Check domain_id with equality in assignment kvs * Moves database setup/teardown closer to its usage * Cleanup config.py * Clean up config help text * Imported Translations from Transifex * test_v3_token_id correctly hash token * Safer noqa handling * Remove noqa form import _s * Fix assertEqual arguments order(auth_plugin, backend, backend_sql, etc) * Expand the use of non-ascii values in ldap test * Properly handle unicode & utf-8 in LDAP * Refactor LDAP API * Use in-memory SQLite for sql migration tests * Use in-memory SQLite for testing * Remove extraenous instantiations of managers * Make service catalog include service name * Add placeholders for reserved migrations 2014.1.rc1 ---------- * Open Juno development * Enable lazy translations in httpd/keystone.py * Avoid using .values() on the indexed columns * Imported Translations from Transifex * revert deprecation of v2 API * Remove unnecessary test setUps * code hygiene; use six.text_type, escape regexp's, use key function * Use CMS to generate sample tokens * Allows override of stdout/stderr/log capturing * exclude disabled services from the catalog * refactor AuthCatalog tests * Rename keystone.tests.fixtures * Change the default version discovery URLs * Remove extra cache layer debugging * Updated from global requirements * Fix doc build errors with SQLAlchemy 0.9 * Sync oslo-incubator db.sqlalchemy b9e2499 * Create TMPDIR for tests recursively * Always include 'enabled' field in service response * test tcp_keepidle only if it's available on the current platform * Add dedicated URL for issuing unscoped federation tokens * Cleanup revocation query * Reduce environment logging * Use assertIsNone when comparing against None * Removes the use of mutables as default args * Add a space after the hash for block comments * Filter SAML2 assertion parameters with certain prefix * Use assertIn in test_v3_catalog * Add support for parallel testr workers in Keystone * is_revoked check all viable subtrees * update sample conf * explicitly import gettext function * expires_at should be in a tuple not turned into one * Comparisons should account for instantaneous test execution * Start using to oslotest * Uses generator expressions instead of filter * Remove unused db_sync from extensions * Ability to turn off ldap referral chasing * Add user_id when calling populate_roles_for_groups * Store groups ids objects list in the OS-FEDERATION object * Make domain_id immutable by default * Do not expose internal data on UnexpectedError * Use oslo db.sqlalchemy.session.EngineFacade.from_config * Uses explicit imports for _ * Rename scope_to_bad_project() to test_scope_to_bad_project() * Make LIVE Tests configurable with ENV * Filter out nonstring environment variables before rules mapping * Provide option to make domain_id immutable * Replace httplib.HTTPSConnection in ec2_token * Move test .conf files to keystone/tests/config_files * Removal of test .conf files * Don't automatically enable revocation events * Ensure v3policysample correctly limits domain_admin access * Sync db, db.sqlalchemy from oslo-incubator 0a3436f * Do not use keystone.conf.sample in tests * Filter LDAP dumb member when listing role assignments * Updated from global requirements * Remove unnecessary oauth1.Manager constructions * Enforce groups presence for federated authn * Update sample config * Very minor cleanup to default_fixtures * Cleanup keystoneclient tests * Cleanup fixture data added to test instances * Cleans up test data from limit tests * Cleanup of instance attrs in core tests * Cleanup backends after each test * Fixup region description uniqueness * Add slowest output to tox runs (testr) * Add missing documentation for enabling oauth1 auth plugin * Add missing documentation for enabling federation auth plugin * Use class attribute to represent 'user' and 'group' * Configurable temporary directory for tests * Call an existing method in sync cache for revoke events * Remove unnecessary calls to self.config() * remove the unused variable in test_sql_upgrade * remove hardcoded SQL queries in tests * Fix db_version failed with wrong arguments * Use config fixture * Fix docstrings in federation related modules * Sync db, db.sqlalchemy, gettextutils from oslo-incubator 6ba44fd * V3 xml responses should use v3 namespace * trust creation allowed with empty roles list * Fix test_provider_token_expiration_validation transient failure * Fix include only enabled endpoints in catalog * Add unit tests for disabled endpoints in catalog 2014.1.b3 --------- * Update ADMIN_TOKEN description in docs * Mark revoke as experimental * Import order is fixed * Remove unused function from tests * Add OS-OAUTH1 to consumers links section * Don't need session.flush in context managed by session * Imported Translations from Transifex * allow create credential with the system admin token * Stop gating on up-to-date sample config file * Always include 'enabled' field in endpoint response * Add the last of the outstanding helpstrings to config * Token Revocation Extension * Remove vim headers * Removes use of timeutils.set_time_override * drop key distribution from icehouse * Limited use trusts * Update curl api example to specify tenant * Update Oslo wiki link in README * Properly configure OS-EP-FILTER test backend * Add tests for endpoint enabled * Remove the un-used and non-maintained PAM identity backend * Remove paste_deploy from test_overrides.conf * SQLAlchemy Change to support more strict dialect checking * Remove "test-only" pam config options * Imported Translations from Transifex * Fix get project users when no user exists * deprecate XML support in favor of JSON * Lazy gettextutils behavior * Fix the order of assertEqual arguments(keystoneclient, kvs, etc) * Update Oslo wiki link in README * Removes a redundant test * Remove unused variable * Implement V3 Specific Version of EC2 Contrib * revocation_list only call isotime on datetime objects * Support authentication via SAML 2.0 assertions * Fix table name typo in test_sql_upgrade * Cleanup and add more config help strings * Ensure v2 API only returns projects in the default domain * Support for mongo as dogpile cache backend * v3 endpoint create should require url * Fix issue with DB upgrade to assignment table * Remove duplicated cms file * oauth1 extension migration fails with DB2 * Handle exception messages with six.text_type * Remove common.sql.migration * Unimplemented error on V3 get token * Updated from global requirements * Replace assertEqual(None, *) with assertIsNone in tests * Fix keystone-manage db_version * Fix assertEqual arguments order(_ldap_tls_livetest, backend_kvs, etc) * Fix assertEqual arguments order(backend_ldap, cache, v3_protection) * Fix the order of assertEqual arguments(v3_auth, v3_identity) * Move _BaseController to common/controllers.py * Remove oslo rpc * Fix webob.exc.HTTPForbidden parameter miss * Remove redundant default value None for dict.get * Remove oslo notifier * Uses the venv virtualenv for the pep8 command * Sync db.exception from Oslo * Update oslo-incubator log.py to a01f79c * Update man pages * Add tests for create grant when no group * Add tests for create grant when no user * Correct a docstring in keystone.common.config * Enable pep8 test against auto-generated configuration * Update config options with helpstrings and generate sample * Keystone doc has wrong keystone-manage command * Fix assertEqual arguments order * strengthen assertion for unscoped tokens * Remove sql.Base * Always hash passwords on their way into the DB * bad config user_enable_emulation in mask test * Convert Token Memcache backend to new KeyValueStore Impl * Implement mechanism to provide non-expiring keys in KVS * Rationalize the Assignment Grant Tables * Add version routes to KDS * Keystone team uses #openstack-keystone now * Adds model mixin for {to,from}_dict functionality * Adds Cloud Audit (CADF) Support for keystone authentication * Use class attribute to represent 'project' * Switch over to oslosphinx * Replace notifier with oslo.messaging * Clean StatsController unnecesary members * Use global to represent OS-TRUST:trust * Additional notifications for revocations * add policy entries for /v3/regions * Use Oslo.db migration * `find_migrate_repo` improvement * Variable 'domain_ref' referenced before assignment * Cleanup Dogpile KVS Memcache backend support * Fix test_provider_token_expiration_validation transient failure * Restructure KDS options to be more like Keystone's options * Setup code for auto-config sample generation * Correct `find_migrate_repo` usage * Make live LDAP user DN match the default from devstack * Set sensible default for keystone's paste * Treat sphinx warnings as errors * Use WebOb directly in ec2_token middleware * Add lockfile and kombu as requirements for keystone * Move filter_limit_query out of sql.Base * List trusts, incorrect self link * LDAP: document enabled_emulation * Remove s3_token functional tests * Provide clearer error when deleting enabled domain * Remove copyright from empty files * Syncing policy engine from oslo-incubator * Rename Openstack to OpenStack * Refactor get role for trust * KDS fix documented exception * Cleanup oauth tests * Correctly normalize consumer fields on update * Add tests for oauth consumer normalize fields * Adds a fixture for setting up the cache * Clean up database fixtures * Fixes bug in exception message generation * reverse my preferred mailmap * Notifications upon disable * Move identity logic from controller to manager * Changing testcase name to match our terminology * Allow specifying region ID when creating region * explicitly expect hints in the @truncated signature * list limit doc cleanup * Correct error class in find_migrate_repo * Remove unnecessary check to see if trustee exists * Enforce current certificate retrieval behaviour * Use WebOb directly for locale testing * Cleanup KDS doc build errors * Adds rule processing for mapping * Add in functionality to set key_mangler on dogpile backends * Fix indentation issue * Cleanup invalid token exception text * Limit calls to memcache backend as user token index increases in size * Style the code examples in docs as python * Fixes a misspelling * Doc - Keystone configuration - moving RBAC section * Doc - Detailing objects' attributes available for policy.json * Do not use auth_info objects for accessing the API * Remove unused method _get_domain_id_from_auth * Remove unused method _get_domain_conf * Remove unused method _store_protocol * Remove tox locale overrides * Remove unused methods from AuthInfo * Remove unused method _create_metadata * Add test for list project users when no user * Fix assignment KVS backend to not use identity * Update kvs assignment backend docs * Don't skip tests for some bugs * Update oslo-incubator fixture to 81c478 * Remove vim header * revise example extension directory structure * Deprecate s3_token middleware * Update requirements to 661e6 * Implement list limiting support in driver backends * Fix misspellings in keystone * Removes use of fake_notify and fixes notify test * Remove host from per notification options * Document priority level on Keystone notifications * Remove default_notification_level from conf * Mock sys.exit in testing * Remove auth_token middleware doc * Move v3_to_v2_user from manager to controller * Update db.sqlalchemy.session from oslo-incubator 018138 * Adds tcp_keepalive and tcp_keepidle config options * Ensure mapping rule has only local and remote properties * clean up keystone-manage man page * Refactor tests move assertValidErrorResponse * fix grammar error in keystone-manage.rst * Add rules to be a required field for mapping schema * Cleanup docstrings * Do not call deprecated functions * Removes useless string * Removes duplicate key from test fixtures * Fixes a Python3 syntax error using raise * Uses six.text_type instead of unicode * Uses six.iteritems for Python3 compat * Add tests to ensure additional remote properties are not validated * Removes xrange for Python3 compat * Cleanup sample config * Change 'oauth_extension' to 'oauth1_extension' * Modified keystone endpoint-create default region * Load the federation manager * Fix indentation errors found by Pep8 1.4.6+ * Mark strings for translation in ldap backends * Remove unused variable assignment * Sync oslo's policy module * Replace urllib/urlparse with six.moves.* * Change Continuous Integration Project link * Remove legacy diablo and essex test cruft * Refactor Auth plugin configuration options * Use self.opt_in_group overrides * Federation IdentityProvider filter fields on update response * Remove unnecessary test methods * Refactor federation controller class hierarchy * Refactor mutable parameter handling * Avoid use of str() with exceptions * Use message when creating Unauthorized exception * Make error strings translatable * Enhancing tests to check project deletion in Active Directory * Add required properties field to rules schema * Fix assignment to not require user or group existence * deprecate access log middleware * remove access log middleware from the default paste pipeline * deprecate v2.0 API in multiple choice response * cleaned up extension development docs * Add a docstring and rename mapping tests * Remove versionId, versionInfo, versionList from examples * Tests initialize database * Don't set default for a nullable column * Remove autoincrement from String column * Fix docstrings in federation controller * Change assertTrue(isinstance()) by optimal assert * sync oslo-incubator log.py * turn off eventlet.wsgi debug * Make boolean query filter "False" argument work * Fix list_projects_for_endpoint failed bug * Introduce database functionality into KDS * Update the default_log_levels defaults * Correct sample config default log levels * deprecate stats middleware * Use passed filter dict param in core sql filtering * Fix federation documentation reference * build auth context from middleware * correct the document links in man documents * Use six.text_type to replace unicode * Don't mask the filter built-in * Move sql.Base.transaction * Remove sql.Base.get_session * renamed extensions development doc * Implement filter support in driver backends * append extension name to trust notifications * Allow event callback registration for arbitrary resource types * Fix test_auth isolation * Policy sample - Identity v3 resources management * Tests use setUp rather than init * Improve forbidden checks * Tests remove useless config list cleanup code * use assertEqual instead of assertIs for string comparison * Don't configure on import * Fix reading cache-time before configured * Cleanup eventlet setup * Remove unused variables from common.config * Reference dogpile.cache.memcached backend properly * Unify StringIO usage with six.StringIO * Fix typos in documents and comments * Sync oslo strutils.py * Use six.string_types instead of basestring 2014.1.b2 --------- * Use six to make dict work in Python 2 and Python 3 * initialize environment for tests that call popen * Don't duplicate the existing config file list * Implement notifications for trusts * Remove kwargs from trust_api.create_trust * Fixup incorrect comment * Simple Certificate Extension * Add mapping function to keystone * Switch from 400 to 403 on ImmutableAttributeError * Identity Providers CRUD operations * Move KDS paths file * Update comments in test_v3_protection.py * description is wrong in endpoint filter rst doc * Drop unsused "extras" dependency * LDAP Assignment does not support grant v3 API * Adds run_tests.sh cli option to stop on failure * Removes option to delete test DB from run_tests.sh * Removes deprecation warning from run_tests.sh * v3 credentials, ensure blob response is json * Store ec2 credentials blob as json * remove unused LOG * Store trust_id for v3/credentials ec2 keypairs * Refactor context trust_id check to wsgi.Application base class * Implementation of internal notification callbacks within Keystone * Replacing python-oauth2 by oauthlib * Fix using non-default default_domain_id * Enhance auth tests for non-default default_domain_id * KVS support domain as namespace for users * Remove unused member from KVS assignment * Enhance tests for non-default default_domain_id * rename templated.TemplatedCatalog to templated.Catalog * Sync with global requirements * Implements regions resource in 3.2 Catalog API * Reduces memory utilization during test runs * reduce default token duration to one hour * Document running with pdb * Restructure developing.rst * Enable lazy translation * Sync gettextutils from oslo-incubator 997ab277 * derive custom exceptions directly from Exception * Do not append to messages with + * Convert Token KVS backend to new KeyValueStore Impl * Fix sample config external default doc * Documentation cleanup * Make common log import consistent * Remove unused variables * Safe command handling for openssl * Fix external auth (REMOTE_USER) plugin support * Cleanup test_no_admin_token_auth cleanup code * Subclasses of TestCase don't need to reset conf * Cleanup test_associate_project_endpoint_extension * Tests use cleanUp rather than tearDown * Remove netifaces requirement * Clean up fakeldap logging * Resolve oauth dependency after paste pipeline is loaded * Change ListOpt default value from str or None to list * Sync oslo-incubator rpc module * Cleanup from business logic refactor * Introduce basic Pecan/WSME framework for KDS * Don't need session.flush in context managed by session * races cause 404 when removing user from project * initialize eventlet for tests * Flush tokens in batches with DB2 * Remove unnecessary line in test_auth * Clean up docstrings in contrib.oauth1.core * Remove unused test function * Remove 'disable user' logic from _delete_domain_contents * Break dependency of base V3Controller on V2Controller * Move deletion business logic out of controllers * Do not update password when updating grants in Assignment KVS * Cleanup of new credential_api delete methods * Enhance list_group_users in GroupApi * Remove noop code * Remove unused imports * Fix typo in test * Fix IPv6 check * Remove unused code in contrib/ec2/controllers.py * Fix use the fact that empty sequences are false * Imported Translations from Transifex * Synchronized with oslo db and db.sqlalchemy * Fix variable passed to driver module * Updated Keystone development install instructions for Ubuntu * Stops file descriptor leaking in tests * Re-write comment for ADMIN_TOKEN * Reduced parameters not used in _populate_user() * Sync several modules from oslo-incubator * Use oslo.db sessions * Switch to oslo-incubator mask_password * Replace xrange in for loop with range * Move Assignment Controllers and Routers to be First Class * Remove Identity and Assignment controller interdependancies * Policy based domain isolation can't be defined * Moves keystoneclient master tests in a new class * Makes the test git checkout info more declaritive * trustee unable to perform role based operations on trust * Cleanup backend loading * Uses oslo's deprecated decorator; removes ours * Move endpoint_filter extension documentation * Refactor setup_logging * Fixes documentation building * Create user returns 400 without a password * Fixes the v2 GET /extensions curl example in the documentation * Add assertSetEqual to base test class * Base Implementation of KVS Dogpile Refactor * Sync db.sqlalchemy from oslo-incubator * Fix errors for create_endpoint api in version2 * Fix issues handling trust tokens via ec2tokens API * Fix typo in identity:list_role_assignments policy * Debug env for tox * Updated from global requirements * Sync global requirements to pin sphinx to sphinx>=1.1.2,<1.2 * Add ABCMeta metaclass to token provider * token provider cleanup * Sync versionutils from oslo * Cleanup duplication in test_backend * replace "global" roles var names with "all" roles * Remove unused token.valid index * Narrow columns used in list_revoked_tokens sql * Remove roles from OS-TRUST list responses * Remove deprecated code * Sync rpc fix from oslo-incubator * Don't run non-tests * Formalize deprecation of token_api.list_tokens * Add index to cover revoked token list 2014.1.b1 --------- * Refactor assertEqualXML into a testtools matcher * Adds support for username to match the v2 spec * One transaction per call to sql assignment backend * Allow caching to be disabled and tests still pass * Sync From OSLO * Updated from global requirements * Revert "Return a descriptive error message for controllers" * Adds a resource for changing a user's password * Deprecates V2 controllers * Updates .gitignore * Ensure the sample policy file won't diverge * Add pycrypto as a test-requirement * Imported Translations from Transifex * Fix typo in keystone * Added documentation to keystone.common.dependency * Make HACKING.rst DRYer * Allow downgrade for extensions * Try decoding string to UTF-8 on error message fail * Import strutils from oslo * Capture debug logging in tests * Easy testing with alternate keystoneclient * Sync log_handler module from Oslo * refactor test_catalog * PasteConfigNotFound also raised when keystone.conf not found * Style improvements to logging format strings * Sync the DB2 communication error code change from olso * Skip test_arbitrary_attributes_* in _ldap_livetest * Add documentation for Read Only LDAP configuration option * Remove deprecated auth_token middleware * Role NoneType object has no attribute setdefault * Utilites for manipulating base64 & PEM * Add memcache options to sample config * UUID vs PKI docs * RST fix for os_inherit example * Rewrites the serveapp method into a fixture * Allow use of rules Policy driver * Return a descriptive error message for controllers * Proxy Assignment from Identity Deprecated * Remove obsolete redhat-eventlet.patch * AuthInfo use dependency injection * Issue unscoped token if user's default project is invalid * Detangle v3 RestfulTestCase setup * Do not name variables as builtins * Updated from global requirements * Removes unused paste appserver instances from tests * Add WSGI environment to context * trusts raise validation error if expires_at is invalid * Fix newly discovered H302 * test attribute update edge cases * Return an error when a non-existing tenant is added to a user * use different bind addresses for admin and public * Sync log module from oslo * Change deprecated CLI arguments * UserAuthInfo use dependency injection * fix unparseable JSON * Duplicate delete the user_project_metadata * Skip test_create_update_delete_unicode_project in _ldap_livetest * don't rebind stdlib's os.chdir function * Dependency cleanup * Moves common RestfulTestCase to it's own module * proxy removed from identity and changed to assignment * Uses fixtures for mox and stubs * Adds fixture package from oslo * Fix KVS create_grant to not raise NotFound if no user/group * Enhance tests for assignment create_grant when no user or group * Clean up duplicate exceptions in docs for assignment.Driver * Remove obsolete driver test module * Change sample policy files to use policy language * Documentation on how-to develop Keystone Extensions * Allow delete user or group at same time as role * Enhance tests for delete_grant no user/group * Fix issue deleting ec2-credentials as non-admin user * Remove duplicated code on test_v3_auth * Removes NoModule from the base testcase * Fixes tox coverage command * Update mailmap for Joe Gordon * Add WWW-Authenticate header in 401 responses * Use abstract base class for endpoint_filter driver * Use abstract base class for oauth driver * Use abstract base class for policy driver * Use abstract base class for token driver * Document tox instead of run_tests.sh * Update my mailmap * remove 8888 port in sample_data.sh * Adds decorator to deprecate functions and methods * Move fakeldap to tests * Fix remove role assignment adds role using LDAP assignment * Enhance tests for deleting a role not assigned * Implementation of opt-out from catalog data during token validation * Add external.Base class to external plugins * Add notifications for groups and roles * add IRC channel & wiki link to README * Add python-six to requirements * Fix v2 token user ref with trust impersonation=True * Changes to testr as the test runner * Fixes error messaging * Handle unicode at the caching layer more elegantly * set user_update policy to admin_required * Remove unused DEFAULT_DOMAIN variable * Remove unused config option auth_admin_prefix * Remove unused member * Adds tests for user extra attribute behavior * Adds identity v2 tests to show extra behavior * Treats OS-KSADM:password as password in v2 APIs * Adds more uniformity to identity update_user calls * Don't use default value in LimitingReader * Use abstract base class for auth handler * Use abstract base class for catalog driver * Use abstract base class for credential driver * Use abstract base class for assignment driver * Use abstract base class for trust driver * Use abstract base class for identity driver * remove the nova dependency in the ec2_token middleware * Catch the socket exception and log it * Fixes broken doc references * Sync db.sqlalchemy * Handle DB2 disconnect * Fix mysql checkout handler AttributeError * Disable lazy gettext 2013.2.rc1 ---------- * Open Icehouse development * Imported Translations from Transifex * Sync with global requirements * Add tests dir to the coverage omit list * Update tox config * Close the cursor for SQLite for 034 upgrade/downgrade on select * Imports oslo policy to fix test issues * Fixes errors logging in as a user with no password * Fix live LDAP tests * Eliminate type error on search_s * Fix error when create user with LDAP backend * assertEquals is deprecated, use assertEqual (H602) * Validate token calls return 404 on invalid tokens * Protect oauth controller calls and update policy.json * Fix updating attributes with ldap backend * sync oslo policy * Changes v1.1 to v2 for Compute endpoint in sample_data.sh * Update man pages * Update man page version * Sync gettextutils from oslo * only run flake8 once (bug 1223023) * upgrade to oslo.config 1.2 final * Add user to project if project ID is changed * Ensure any relevant tokens are revoked when a role is deleted * Check token_format for default token providers only * Modify oauth1 tests to use generated keystone token in a call * Test for backend case sensitivity * Remove ldap identity domain attribute options * Cleanup of tenantId, tenant_id, and default_project_id * Add extra test coverage for unscoped token invalidation * Monkey patch select in environment * Rewrite README.rst * Enclose command args in with_venv.sh * check for domain existence before doing any ID work * Ensure v2 tokens are correctly invalidated when using BelongsTo * Sync gettextutils from oslo * Use localisation for logged warnings * Fix misused assertTrue in unit tests * oauth using optional dependencies * Rationalize list_user_projects and get_projects_for_user * Optional dependency injection * Include new notification options in sample config * fix rst syntax in database schema migrations docs * Ignore H803 from Hacking * Test upgrade migration 16->17 * test token revocation list API (bug 1202952) * Imported Translations from Transifex * gate on H304: no relative imports * Move gettextutils installation in tests to core * Cleanup tests imports so not relative * Tests use "from keystone import tests" * Reduce churn of cache on revocation_list * domain-specific drivers experimental in havana * Fixes for user response with LDAP user_enabled_mask * Close each LDAP connection after it is used, following python-ldap docs * Remove CA key password from cert setup * Import core.* in keystone.tests * Fix incorrect test for list_users * Changed header from LLC to Foundation based on trademark policies * Changes template header for translation catalogs * Support timezone in memcached token backend 2013.2.b3 --------- * Imported Translations from Transifex * Move CA key from certs directory to private directory * OAuth authorizing user should propose roles to delegate * Need to use _() to handle i18n string messages * Fix the code miss to show the correct error messages * Move _generate_paste_config to tests.core * add 'project' notifications to docs * Implement basic caching around assignment CRUD * Update keystone wsgi httpd script for oslo logging * Utilities to create directores, set ownership & permissions * Modify default file/directory permissions * Add a oauth1-configuration.rst and extension section to docs * Update keystone-all man page * Cleanup cache layer tests * Implement caching for Tokens and Token Validation * Document usage notifications * Imported Translations from Transifex * Remove kvs backend from oauth1 extension * Use joins instead of multiple lookups in groups sql * Add project CRUD to assignment_api Manager * Add Memory Isolating Cache Proxy * Enable SQL tests for oauth * Implement decorator-based notifications for users * Use common db model class from Oslo * Add common code from Oslo for work with database * Use testtools as base test class * Bump hacking to 0.7 * Removes KVS references from the documentation * Add notifications module * Drop support for diablo to essex migrations * Add 'cn' to attribute_list for enabled_users/tenants query * Implement API protection on target entities * Refactor Token Provider to be aware of expired tokens * Implement Caching for Token Revocation List * Keystone Caching Layer for Manager Calls * Create associations between projects and endpoints * Fixes a link in the documentation * Use correct filename for index & serial file when setting permissions * remove flake8 option from run_tests.sh * Fix role lookup for Active Directory * Clean up keystone-manage man page * change oauth.consumer description into nullable * Use system locale when Accept-Language header is not provided * Fix translate static messages in response * Migrating ec2 credentials to credential * Fix error where consumer is not deleted from sql * add foreign key constraint on oauth tables * Remove a useless arg in range() * Remove enumerate calls * filter in ldap list_groups_for_user * Delete file TODO * use provider to validate tokens * Fix isEnabledFor for compatibility with logging * Ensure username passed by REMOTE_USER can contain '@' * fix the default values for token and password auth * Remove an enumerate call * Add defense in ldap:get_roles_for_user_and_project * remove unused function * Remove Keystone specific logging module * remove refs to keystone.common.logging * Remove User Check from Assignments * Refactor Token Providers for better version interfaces * Remove kwargs from manager calls / general cleanup * Store hash of access as primary key for ec2 type * Add delegated_auth support for keystone * Fix LDAP Identity get user with user_enabled_mask * Fix LDAP Identity with non-zero user_enabled_default * More validation in test_user_enable_attribute_mask * Add test test_deleting_project_delete_grants * Cleaned up a few old crufties from README * Clean hacking errors in advance of hacking update * Add unit test to check non-string password support * Assignment to reserved built-in symbol: filter * Implement domain specific Identity backends * Increase length of username in DB * Cleaned up pluggable auth docs * Fix test_user_enable_attribute_mask so it actually tests * Do not skip test_user_enable_attribute_mask in _ldap_livetest * Skip test_create_unicode_user_name in _ldap_livetest * Refactor Keystone to use unified logging from Oslo * Revoke user tokens when disabling/delete a project * Move affirm_unique() in create() to BaseLdap * Move some logic from update() to BaseLdap * Add support for API message localization * Remove unused import * Assignment to reserved built-in symbol: dir * Move 'tests' directory into 'keystone' package * Initial implementation of unified-logging * Sync notifier module from Oslo * Move Babel dependency from test-req to req * Ignore flake issues in build/ directory * update usage in run_test.sh for flake8 * Drop extra credential indexes * Sync models with migrations * Add memcache to httpd doc * Sync unified logging solution from Oslo * Configurable max password length (bug 1175906) * Fix select n+1 issue in keystone catalog * Make pki_setup work with OpenSSL 0.9.x * extension migrations * Create default role on demand * Set wsgi startup log level to INFO * Abstract out attribute_ignore assigning in LDAP driver * Abstract out attribute_mapping filling in LDAP driver * Imported Translations from Transifex * remove swift dependency of s3 middleware * Raise max header size to accommodate large tokens * Clean up use of token_provider manager in tests * add OS-TRUST to links * Run test_mask_password once * Remove kwargs from manager calls where not needed * V3 API need to check mandatory field when creating resources * Use dependency injection for assignment and identity * Handle circular dependencies * Clear out the dependency registry between tests * .gitignore eggs * Handle json data when migrating role metadata * Sync DB models and migrations in keystone.assignment.backends.sql * Remove passwords from LDAP queries * use 'exc_info=True' instead of import traceback * Fix typo: Tenents -> Tenants * Use keystone.wsgi.Request for RequestClass * Update references with new Mailing List location * Scipped tests don't render as ERROR's * Implement exception module i18n support * Remove vestiges of Assignments from LDAP Identity Backend * Load backends before deploy app in client tests * default token format/provider handling * Fixing broken credential schema in sqlite * Use assignment_api rather than assignment * Deprecate kvs token backend * Ec2 credentials table not created during testing * Correct Spelling Mistake * Remove an enumerate call * Load app before loading legacy client in tests * Add [assignment].driver to sample config * Deprecation warning for [signing] token_format * Support token_format for backward compatibility * sql.Driver:authenticate() signatures should match * update requires to prevent version cap * Return correct link for effective group roles in GET /role_assignments * Implement Token Binding * Implemented token creation without catalog response * Fix XML rendering with empty auth payload * Pluggable Remote User * grammar fixes in error messages * Implement role assignment inheritance (OS-INHERIT extension) * Implements Pluggable V2 Token Provider * Register Extensions * Implements Pluggable V3 Token Provider * Mixed LDAP/SQL Backend * Clear cached engine when global engine changes * python3: Introduce py33 to tox.ini * Add version so that pre-release versioning works * Sync-up crypto from oslo-incubator * Add crypto dependency * Imported Translations from Transifex * Change domain component value to org from com * Move temporary test files into tests/tmp * Use InnoDB for MySQL * Rationalize how we get roles after authentication in the controllers * Python 3.x compatible use of print * Regenerate example PKI after change of defaults * assignment backend * wsgi.BaseApplication and wsgi.Router factories should use **kwargs * Add unittest for keystone.identity.backends.sql Models * Imported Translations from Transifex * Do not create LDAP Domains sub tree * Use oslo.sphinx and remove local copy of doc theme * Move comments in front of dependencies * Remove context from get_token call in normalize_domain_id * Fix issue with v3 tokens and group membership roles * Sync install_venv_common from oslo * Remove a useless arg in range() * Remove an enumerate call * Update paths to pem files in keystone.conf.sample * Don't use deprecated BaseException.message * Add callbacks for set_global_engine * Work without admin_token_auth middleware * Implement GET /role_assignment API call * rename quantum to neutron in docs * Install locales for httpd * DB2 migration support * Use event.listen() instead of deprecated listeners kwarg * Add 'application' to keystone.py for WSGI * Remove hard tabs and trailing whitespace * Manager instead of direct driver * check for constraint before dropping * Stop passing context to managers (bug 1194938) * `tox -ecover` failure. Missing entry in tox.ini * Clean up keystone-all.rst * Fix up some trivial license mismatches * Revert environment module usage in middleware * LDAP list group users not fail if user entry deleted * Do not raise NEW exceptions * Move identity ldap backend from directory to file * wsgi.Middleware factory should use **kwargs * Removing LDAP API Shim * Consolidate admin_or_owner rule * Isolate eventlet code into environment * Set default 'ou' name for LDAP projects to Projects * Imported Translations from Transifex * Imported Translations from Transifex * Move user fileds type check to identity.Manager * Http 400 when project enabled is not a boolean * Imported Translations from Transifex * Correct the resolving api logic in stat middleware * Remove a stat warning log * Using sql as default driver for tokens * Correct LDAP configuration doc * Force simple Bind for authentication * Initialize logging from HTTPD * LDAP get_project_users should not return password * Add checks to test if enabled is bool * Fix link typo in Sphinx doc * python WebOb dependency made unpinned * Remove explicit distribute depend * Version response compatible with Folsom * Adds tests for XML version response * Replace openstack-common with oslo in docs * drop user and group constraints * Correct the default name attribute for role * Allow request headers access in app context * Remove how to contribute section in favor of CONTRIBUTING.rst * Fix token purging for memcache for user token index * add ca_key to sample configuration * Commit transaction in migration * Fix internal doc links (bug 1176211) * Missing contraction: Its -> It's (bug 1176213) * Pass on arguments on Base.get_session * Remove bufferedhttp * Move coverage output dir for Jenkins * Check schema when dropping constraints * Import eventlet patch from oslo * Raise key length defaults * Base.get_engine honor allow_global_engine=False * run_tests.sh should use flake8 (bug 1180609) * Ignore the .update-venv directory * Ignore conflict on v2 auto role assignment (bug 1161963) * remove_role_from_user_and_project affecting all users (bug 1170649) * Maintain tokens after role assignments (bug 1170186) * split authenticate call * Add db_version command to keystone-manage * Live SQL migration tests * Fix incorrect role assignment in migration * typo in 'import pydev' statement * Fixes a typo * Imported Translations from Transifex * Improve the performance of tokens deletion for user * Revert "Set EVENTLET_NO_GREENDNS=yes in tox.ini." * Disable eventlet monkey-patching of DNS * Fix the debug statement * Document size limits * Add index on valid column of the SQL token Backend * Add KEYSTONE_LOCALEDIR env variable * Add arg to keystone-manage db_sync 2013.2.b1 --------- * Add index on expires column of the SQL token Backend * fix error default policy for create_project * Require keystone-user/-group for pki_setup * Replace assertDictContainsSubset with stdlib ver * separate paste-deploy configuration from parameters * Add missing oslo module * Convert openstack-common.conf to the nicer multiline format * Rename requires files to standard names * Cleanup docstrings (flake8 H401, H402, H403, H404) * imports not in alphabetical order (flake8 H306) * import only modules (flake8 H302) * one import per line (flake8 H301) * eliminate 'except:' (flake8 H201) * consistent i18n placeholders (flake8 H701, H702, H703) * use the 'not in' operator (flake8 H902) * Use TODO(NAME) (flake8 H101) * Remove unnecessary commented out code * Enumerate ignored flake8 H* rules * Migrate to pbr * Remove unused variables (flake8 F841) * Satisfy flake8 import rules F401 and F403 * Test 403 error title * Imported Translations from Transifex * Remove useless private method * Consolidate eventlet code * Use webtest for v2 and v3 API testing * Add missing space to error msg * Imported Translations from Transifex * Read-only default domain for LDAP (bug 1168726) * Add assertNotEmpty to tests and use it * Implement Token Flush via keystone-manage * get SQL refs from session (bp sql-query-get) * extracting credentials * Move auth_token middleware from admin user to an RBAC policy * Accept env variables to override default passwords * Http 400 when user enabled is not a boolean * Migrate to flake8 * Fix pyflakes and pep8 in prep for flake8 * Allow backend & client SQL tests on mysql and pg * Revert "Disable eventlet monkey-patching of DNS" * Set EVENTLET_NO_GREENDNS=yes in tox.ini * Disable eventlet monkey-patching of DNS * Revoke tokens on user delete (bug 1166670) * A minor refactor in wsgi.py * Skip IPv6 tests for eventlet dns * LDAP list groups with missing member entry * Fix 403 status response * Remove unused CONF.pam.url * Mark LDAP password and admin_token secret * HACKING LDAP * Make migration tests postgres & mysql friendly * Documentation about the initial configuration file and sample data * Add rule for list_groups_for_user in policy.json * Test listing of tokens with a null tenant * fix duplicate option error * Delete extra dict in token controller * What is this for? * Removed unused imports * Remove non-production middleware from sample pipelines * Replace password to "***" in the debug message * Fixed logging usage instead of LOG * Remove new constraint from migration downgrade * Allow additional attribute mappings in ldap * Enable unicode error message * Sync with oslo-incubator copy of setup.py * Set empty element to "" * Fixed unicode username user creation error * Fix token ids for memcached * Use is_enabled() in folsom->grizzly upgrade (bug 1167421) * Generate HTTPS certificates with ssl_setup * Fix for configuring non-default auth plugins properly * test duplicate name * Add TLS Support for LDAP * fix undefined variable * clean up invalid variable reference * Clean up duplicate methods * stop using time.sleep in tests * don't migrate as often * use the openstack test runner * Fix 401 status response * Fix example in documentation * Fix IBM copyright strings * Share one engine for more than just sqlite in-memory * Add missing colon for documentation build steps * Mark sql connection with secret flag 2013.1.rc2 ---------- * Fix test coverage for v2 scoped auth xml response (bug 1160504 * Fix test coverage for v2 scoped auth xml response (bug 1160504) * close db migration session * Use string for port in default endpoints (bug 1160573) * keystone commands don't print any version information * bug 1159888 broken links in rst doc * use the roles in the token when recreating * Sync with oslo-incubator * Rename trust extension (bug 1158980) * Rename trust extension * keystone commands don't print any version information * Imported Translations from Transifex 2013.1.rc1 ---------- * Add a dereference option for ldap * Make versions aware of enabled pipelines * Move trusts to extension * Move trusts to extension * Version bump to 2013.2 * Add a dereference option for ldap * Allow trusts to be optional * Enable emulation for domains * Wrap config module and require manual setup (bug 1143998) * Correct spacing in warning msg * Prohibit V3 V2 token intermix for resource in non-default domain (bug 1157430) * Properly handle emulated ldap enablement * Support for LDAP groups (bug #1092187) * Validate domains unconditionally (bug 1130236) * Fix live ldap tests * V2, V3 token intermix for unscoped tokens (bug 1156913) * Pass project membership as dict in migration 015 * Ensure delete domain removes all owned entities * Utilize legacy_endpoint_id column (bug 1154918) * Test default_project_id scoping (bug 1023502) * Fix XML handling of member links (bug 1156594) * Discard null endpoints (bug 1152632) * extracting user and trust ids into normalized fields * No parent exception to wrap * Remove duplicate password/token opts * xml_body returns backtrace on XMLSyntaxError * duplicated trust tests * Migrate roles from metadata to user_project_metadata * Fixes bug 1151747: broken XML translation for resource collections * Revise docs to use keystoneclient.middleware.auth_token * quiet route logging on skipped tests * Ensure tokens are revoked for relevant v3 api calls * Remove un-needed LimitingReader read() function * Catch and log server exceptions * Added test cases to improve LDAP project testing * Switch to final 1.1.0 oslo.config release * Filter out legacy_endpoint_id (bug 1152635) * Improve tests for api protection and filtering * add belongs_to check * Revert "update tests/__init__.py to verify openssl version" * Revert "from tests import" * Make Keystone return v3 as part of the version api * Run keystone server in debug mode * remove spurious roles check * bug 1133526 * Fix folsom -> grizzly role table migration issues (bug 1119789) * Delete tokens for user * from tests import * v3 endpoints won't have legacy ID's (bug 1150930) * return 201 Created on POST request (bug1131119) * add missing attributes for group/project tables (bug1126021) * Remove unused methods from LDAP backed * Move get_by_name to LdapBase * fix typo in kvs backend * mark 2.0 API as stable * unable to load certificate should abort request * Move auth plugins to 'keystone.auth.plugins' (bug 1136967) * Change exception raised to Forbidden on trust_id * cleanup trusts in controllers * remove unused import * ports should be ints in config (bug 1137696) * Expand v3 trust test coverage * Trusts * bug 1134802: fix inconsistent format for expires_at and issued_at * Sync timeutils with oslo * Straighten out NotFound raising in LDAP backend * residual grants after delete action (bug1125637) * Remove TODO that didn't land in grizzly * Make getting user-domain roles backend independant * Explain LDAP page_size & default value * Imported Translations from Transifex * Enable a parameters on ldap to allow paged_search of ldap queries This fixes bug 1083463 * update tests/__init__.py to verify openssl version * command line switch for short pep8 output * Convert api to controller * bug 1131840: fix auth and token data for XML translation * flatten payload for policy * Unpin pam dependency version * keystone : Use Ec2Signer utility class from keystoneclient * Move handle_conflicts decorator into sql * domain_id_attributes in config.py have wrong default value * Rework S3Token middleware tests * Remove obsolete *page[_marker] methods from LDAP backend * Setup logging in keystone-manage command * Ensure keystone unittests do not leave CONF.policyfile in bad state * catch errors in wsgi.Middleware * Fix id_to_dn for creating objects * Tests for domain-scoped tokens * domain-scoping * Pass query filter attributes to policy engine * Removed redundant assertion * v3 token API * Update oslo-config version * Correct SQL migration 017 column name * merging in fix from oslo upstream * enabled attribute emulation support * Change the default LDAP mapping for description * Ensure user and tenant enabled in EC2 * Disable XML entity parsing * Remove old, outdated keystone devref docs * Update the Keystone policy engine to the latest openstack common * Implement name space for domains * Update sample_data.sh to match docs * project membership to role conversion * Remove test_auth_token_middleware * Workaround Migration issue with PostgreSQL * make LDAP query scope configurable * make fakeldap._match_query work for an arbitrary number of groups * Use oslo-config-2013.1b3 * Remove usage of UserRoleAssociation.id in LDAP * Add an update option to run_tests.sh * Add pysqlite as explicit test dep * fix unit test when memcache middleware is not configured * add missing kvs functionality (bug1119770) * Update to oslo version code * adding additional backend tests (bug1101244) * Fix spelling mistakes * Cleaned up keystone-all --help output * Keystone backend preparation for domain-scoping * Use install_venv_common.py from oslo * Spell accommodate correctly * Missed import for IPv6 tests skip * Add missing log_format, log_file, log_dir opts * Fix normalize identity sql ugrade for Mysql and postgresql * remove duplicate model declaration/attribution * simplify query building logic * Fix test_contrib_s3_core unit test * Expand dependency injection test coverage * remove unneeded config reloading (it's already done during setUp) * allow unauthenticated connections to an LDAP server * Relational API links * return 400 Bad Request if invalid params supplied (bug1061738) * UserApi.update not to require all fields in arg * Tenant update on LDAP breaks if there is no update to apply * Query only attributes strictly required for keystone when using it with existing LDAP servers * Update .coveragerc * Add size validations to token controller * add check for config-dir parameter (bug1101129) * Silence routes internal debug logging * Imported Translations from Transifex * Delete Roles for User and Project LDAP * Why .pop()'ing urls first is important * don't create a new, copied list in get_project_users * Fixes 'not in' operator usage * Add --keystone-user/group to keystone-manage pki_setup * Adds png versions of all svg image files. Changes reference * Updates migration 008 to work on PostgreSQL * Create a default domain (bp default-domain) * Generate apache-style common access logs * import tools/flakes from oslo * tenant to project in the apis * Tenant to Project in Back ends * Fix bugs with set ldap password * Enable/disable domains (bug 1100145) * Readme: use 'doc' directory not 'docs' * rename tenant to project in sql * Update to requests>=1.0.0 for keystoneclient * Fix pep8 error * Document user group LDAP options * Sync latest cfg from oslo-incubator * Limit the size of HTTP requests * Fix role delete method in LDAP backend * public_endpoint & admin_endpoint configuration * Skip IPv6 tests if IPv6 is not supported * Allow running of sql against the live DB * Test that you can undo & re-apply all migrations * downgrade user and tenant normalized tables downgraded such that sqlite is supported, too * Auto-detect max SQL migration * Safer data migrations * Sync base identity Driver defs with SQL driver * Fix i18n of string templates * Enhance wsgi to listen on ipv6 address * add database string field length check * Autoload schema before creating FK's (bug 1098174) * Enable exception format checking in the tests * reorder tables for delete * Validated URLs in v2 endpoint creation API * Fixes import order nits * Cleanup keystoneclient testing requirements * Fix issue in test_forbidden_action_exposure * Correct spelling errors / typos in test names * Update ldap exceptions to pass correct kwargs * Add _FATAL_EXCEPTION_FORMAT_ERRORS global grizzly-2 --------- * Keystone server support for user groups * Add missing .po files to tarball * Imported Translations from Transifex * adds keyring to test-requires * Revert "shorten pep8 output" * Upgrade WebOb to 1.2.3 * il8n some strings * Imported Translations from Transifex * Removed unused variables * Removed unused imports * Add pyflakes to tox.ini * Fix spelling typo * shorten pep8 output * Driver registry * Adding a means to connect back to a pydevd debugger * add in pip requires for requests * Split endpoint records in SQL by interface * Fix typo s/interalurl/internalurl/ * module refactoring * Test for content-type appropriate 404 (bug 1089987) * Imported Translations from Transifex * fixing bug 1046862 * Expand default time delta (bug 1089988) * Add tests for contrib.s3.core * Test drivers return HTTP 501 Not Implemented * Support non-default role_id_attribute * Remove swift auth * Move token controller into keystone.token * Import pysqlite2 if sqlite3 is not available * Remove mentions of essex in docs (bug 1085247) * Ensure serviceCatalog is list when empty, not dict * Adding downgrade steps for migration scripts * Port to argparse based cfg * Only 'import *' from 'core' modules * use keystone test and change config during setUp * Bug 1075090 -- Fixing log messages in python source code to support internationalization * Added documentation for the external auth support * check the redirected path on the request, not the response * Validate password type (bug 1081861) * split identities module into logical parts remove unneeded imports from core * Ensure token expiration is maintained (bug 1079216) * normalize identity * Fixes typo in keystone setup doc * Imported Translations from Transifex * Stop using cfg's internal implementation details * syncing run_tests to match tox grizzly-1 --------- * Expose auth failure details in debug mode * Utilize policy.json by default (bug 1043758) * Wrap v3 API with RBAC (bug 1023943) * v3 Identity * v3 Catalog * v3 Policies * Import auth_token middleware from keystoneclient * Imported Translations from Transifex * Refix transient test failures * Make the controller addresses configurable * Expose authn/z failure info to API in debug mode * Refactor TokenController.authenticate() method * Fix error un fixtures * Ensures User is member of tenant in ec2 validation * Properly list tokens with a null tenant * Reduce total number of fixtures * Provide config file fields for enable users in LDAP backend (bug1067516) * populate table check * Run test_keystoneclient_sql in-memory * Make tox.ini run pep8 checks on bin * tweaking docs to fix link to wiki Keystone page * Various pep8 fixes for keystone * Use the right subprocess based on os monkeypatch * Fix transient test failures (bug 1077065, bug 1045962) * Rewrite initial migration * Fix default port for identity.internalURL * Improve feedback on test failure * fixes bug 1074172 * SQL upgrade test * Include 'extra' attributes twice (bug 1076120) * Return non-indexed attrs, not 'extra' (bug 1075376) * bug 1069945: generate certs for the tests in one place * monkeypatch cms Popen * HACKING compliance: consistent use of 'except' * auth_token hash pki key PKI tokens on hash in memcached when accessed by auth_token middelware * key all backends off of hash of pki token * don't import filter_user name, use it from the identity module * don't modify the passed in dict to from_dict * move hashing user password functions to common/utils * ignore .tox directory for pep8 in runtests * Imported Translations from Transifex * Implements REMOTE_USER authentication support * pin sqlalchemy to 0.7 * Move 'opentack.context' and 'openstack.params' definitions to keystone.common.wsgi * Removes duplicate flag for token_format * Raise exception if openssl stderr indicates one * Ignore keystone.openstack for PEP8 * Fixed typo in log message * Fixes 500 err on authentication for invalid body * Enable Deletion of Services with Endpoints * Exception.message deprecated in py26 (bug 1070890) * Utilize logging instead of print() * stop LdapIdentity.create_user from returning the user's password * Compare token expiry without seconds * Moved SQL backend tests into memory * Add trove classifiers for PyPI * Adding handling for get user/tenant by name * Fixed bug 1068851. Refreshed new crypto for the SSL tests * move filter_user function to keystone.identity.core * Fixes response for missing credentials in auth * making PKI default token type * Fixes Bug 1063852 * bug 1068674 * Update common * Extract hardcoded configuration in ldap backend (bug 1052111) * Fix Not Found error, when router not match * add --config-dir=DIR for keystone-all option * Add --config-dir=DIR in OPTIONS * Delete role does not delete role assignments in tenants (bug 1057436) * replacing PKI token detection from content length to content prefix. (bug 1060389) * Document PKI configuration and management * Raise if we see incorrect keyword args "condition" or "methods" * Filter users in LDAP backend (bug 1052925) * Use setup.py develop to insert code into venv * Raise 400 if credentials not provided (bug 1044032) * Fix catalog when services have no URL * Unparseable endpoint URL's should raise friendly error * Configurable actions on LDAP backend in users Active Directory (bug 1052929) * Unable to delete tenant if contains roles in LDAP backend (bug 1057407) * Replaced underscores with dashes * fixes bug 1058429 * Command line switch for standard threads * Remove run_test.py in favor of stock nose * utf-8 encode user keys in memcache (bug 1056373) * Convert database schemas to use utf8 character set * Return a meaningful Error when token_id is missing * Backslash continuation cleanup * notify calling process we are ready to serve * add Swift endpoint in sample data * Updated Fix for duplicated entries on LDAP backend for get_tenant_users * Fix wsgi config file access for HTTPD * Bump version to 2013.1 folsom-rc1 ---------- * Limit token revocation to tenant (bug 1050025) * Fixed trivally true tests (bug 983304) * add Quantum endpoint in sample data * Add XML namespace support for OSADM service api * Delete user tokens after role grant/revoke * LDAP backend attribute fixes * Document memcached host system time configuration * Implementation of tenant,user,role list functions for ldap * Initialize Metadata variable * Cleanup PEP8 errors from Common * List tokens for memcached backend * Implement token endpoint list (bug 1006777) * Ignore eclipse files * Identity API v3 Config, Routers, Controllers * Sync some misc changes from openstack-common * Sync latest cfg from openstack-common * Remove id_hash column * LOG.warn all exception.Unauthorized authentication failures * Fixed: test_default_tenant_uuid_token not running * Upgrade PEP8 to 1.3.3 (bug 1037303) * Expand PEP8 coverage to include docs & tests * Removed/fixed unused variable references * HACKING compliance & staticly init module vars * PEP8 fix E251 * PEP8 fix * Removed unused imports * Check for expected cfg impl (bug 1043479) * Fixed typos in comment * HACKING: Import by full module path * HACKING: Use single quotes * mistake in doc string * pep8 1.3.3 cleanup removing unused imports * Removed dead code * Fix auth_token middleware to fetch revocation list as admin * Require authz to update user's tenant (bug 1040626) * Code cleanup in doc/source/conf.py * Typo fix in keystone: existant => existent * allow middleware configuration from app config * PEP8 fix for PAM test * change verbose and debug to Fasle in keystone.conf.sample * add token_format=UUID to keystone.conf.sample * Demonstrate that authenticate() returns roles * Add nosehtmloutput as a test dependency * Less information returned with IntegrityError * Support running the tests in the debugger * Removed stray print statement (bug 1038131) * Remove unused variables * PKI Token revocation * Remove unused imports * Adding missing files to MANIFEST.in * Simplify the sql backend deletion of users and tenants * Add tests for PAM authentication * Allow overloading of username and tenant name in the config files * Enabling SQL Catalog tests (bug 958950) * Use user home dir as default for cache * Set example key_size to 1024 * Log errors when signing/verifying * Implement python version of migration 002 * Set default signing_dir based on os USER * Assert adminness on token validation (bug 1030968) * Test for Cert by name * Typo error in keystone/doc/source/configuration.rst * fix broken link * Cryptographically Signed tokens * Sync jsonutils from openstack-common * Added user name validation. Fixes bug 966251 * Import ec2 credentials from old keystone db * Debug output may include passwords (bug 1004114) * Raise unauthorized if tenant disabled (bug 988920) * Files for Apache-HTTPD * Implementation of LDAP functions * Fix the wrong infomation in keystone-manage.rst * Webob needs body to calc Content-Length (bug 1016171) * Prevent service catalog injection in auth_token * Admin Auth URI prefix * updating testing documentation * adding keystoneclient test * Removed redundant / excessively verbose debug * Making docs pretty! * Adding user password setting api call * Fixing pep8 errors in tests/*py * Make sure user dict has id key before checking against it * pep8 for openssl * Run pep8 for tests * Move monkey patch to keystone-all startup * Use sdist tarball instead of zipball * Return a 409 error when adding a second time a role to user/tenant * notify calling process we are ready to serve folsom-2 -------- * Set iso8601 module as default dependence * Fixed user-only role deletion error * Use PyPI for keystoneclient * keystone_manage certificate generation * documenting models * Reorder test imports by full import path * pep8 v1.3.3 compliance (bug 1019498) * Correct Tree DN * don't assume that the LDAP server require authentication * fix variable names to coincide with the ones in common.ldap * Keystone should use openstack.common.timeutils * Fixed marker & limit computation (bug 1006055) * Do not crash when trying to remove a user role (without a tenant) * Keystone should use openstack.common.jsonutils * Refactor 404's into managers & drivers (bug 968519) * fix sphinx warnings * fix man page build * Utilize newer changes in openstack-common * Add .mailmap file * setting up babel for i18n work blueprint start-keystone-i18n * Removed unused import * Fix order of returned tuple elements in pam authenticate * Reorder imports by full module path * Pass serviceCatalog in auth_token middleware * Fixed typo in routing conditions (bug 1006793) * 400 on unrecognized content type (bug 1012282) * Basic request stats monitoring & reporting * Monkey patching 'thread' * Speed up SQL unit tests * PEP8 fixes * Clean up test requires a bit * Use cfg's new global CONF object * Add s3 extension in keystone.conf sample * Tweak for easier, safer subclassing * Revert file mode to be non-executable * fix importing of optional modules in auth_token * Carrying over token expiry time when token chaining * Keystone should use openstack.common.importutils * Require authz for user role list (bug 1006815) * Require authz for service CRUD (bug 1006822) * PEP8 fixes * Use cfg's new behavior of reset() clearing overrides * Use cfg's new group autocreation feature * Sync with latest version of openstack.common.cfg * blueprint 2-way-ssl * Fixes some pep8 warning/errors * Update swift_auth documentation * Add ACL check using : format * Use X_USER_NAME and X_ROLES headers folsom-1 -------- * Allow other middleware overriding authentication * Backslash continuation removal (Keystone folsom-1) * Remove service_* from authtoken examples * Nail prettytable test dependency at 0.5.0 * Invalidate user tokens when a user is disabled * Fix depricated /users/{user-id}/roles * Changed arguments in keystone CLI for consistency * Add validations of 'name' field for roles, users and tenants * Added 'NormalizingFilter' middleware * One 'ctrl-c' kills keystone * Make sure we parse delay_auth_decision as boolean * Flush tenant membership deletion before user * notify calling process we are ready to serve * Invalidate user tokens when password is changed * Added tenant name validation. Fixes bug 966249 * Corrects url conversion in export_legacy_catalog * Truly handle mailmap entries for all combinations * fix pam admin user case * Improve the sample keystone.conf * Add defaults for ldap options * Sync to newer openstack-common * Set defaults for sql options * Set defaults for port options * Add defaults for driver options * Use ConfigOpts.find_file() to locate catalog template * Use ConfigOpts.find_file() to locate policy.json * Policy doc updates; RST syntax consistency * Removed SimpleMatch 'shim'; updated readme * Removed old sections; improved syntax consistency * cleanup dependent data upon user/tenant deletion * Update tests to run servers on 127.0.0.1 * Switch to 1000 rounds during unit tests * Fix argument name referred in the document * Exit on error in a S3 way * Auto generate AUTHORS file for keystone component * Misnamed exception attribute (bug 991936) * Avoid ValueError in 12.04 essex pkg (bug 988523) * Non-nullable User, Tenant, Role names (bug 987121) * Fix expired token tests * Make run_tests.py non-executable * Add distribute to test-requires * Makes the ldap backend return proper role metadata * cleanup no_meta user in live LDAP test * Add ChangeLog to tarball * Fix "it's" grammar errors * Rename keystone.conf to .sample * Import latest openstack-common * Stub out swift log configuration during testing * Remove tenant membership during user deletion * Add a _ at the end of reseller_prefix default * additional logging to support debugging auth issue * Add support to swift_auth for tokenless authz * Make import_nova_auth only create roles which don't already exist * don't duplicate the extra dict in extra * Fix looking for config files * endpoint-crud 404 (bug 963056) * user-role-crud 404 (bug 963056) * ec2-credential-crud 404 (bug 963056) * service-crud 404 (bug 963056) * user-crud 404 (bug 963056) * tenant-crud 404 (bug 963056) * Add build artifacts missing from .gitignore * Switch keystone.test.TestCase to use unittest2 * Raise keystone.exception for HTTP 401 (bug 962563) * Fixed misc errors in configuration.rst * Docs: SQL-based vs File-based Service Catalog * Improve service CRUD test coverage * Change default catalog driver to SQL; doc the options * Replace tabs with spaces * role-crud 404 (bug 963056) * Improve swift_auth test coverage + Minor fixes * Open Folsom essex-rc1 --------- * S3 tokens cleanups * Check values for EC2 * Fix critical typo in endpoint_create (bug 961412) * updating docs to include creating service accts * unique role name constraint * Add test for swift middleware * Spring cleaning, fix PEP8 violations * Rename tokenauth to authtoken * pass the arguments in when starting keystone-all * fix keystone-all's usage of options vs conf * Wrapped unexpected exceptions (bug 955411) * Changing belongsTo validation back to ID * Clean up sql connection args * Improved file logging example (bug 959610) * Swift middleware doc update * Fixes LP #954089 - Service list templated catalog * Remove nova-specific middlewares * Add check for MAX_PASSWORD_LENGTH to utils * Remove glance_auth_token middleware * Support PyPAM in pam backend, update to latest API * Fix default port for identity.internalURL * Installing keystone docs * Update username -> name in token response * Refactor keystone.common.logging use (bug 948224) * Add automatically generated code docs * Properly return 501 for unsupported Catalog calls * docstring cleanup to remove sphinx warnings * updating documentation for rewrite of auth_token * Allow connect to another tenant * Update docs for keystone client cli args * Raising unauthorized instead of 500 (bug 954547) * Failing to update tenants (bug 953678, bug 954673) * added LDAP section to architecture and architecture * Bug #943031 MySQL Server has gone away added docnotes of error messages caught for mysql and reference * making all use of time follow datetime.utcnow() fixes bug 954057 * Improved legacy tenancy resolution (bug 951933) * sample_data.sh: check file paths for packaged installations * Fix iso8601 import/use and date comparaison * Fix double-quoted service names * Remove Nova Diablo reference from migrate docs * Fixes the cli documentation of user/tenant/roles * Add simple set of tests for auth_token middleware * update documention on changing user password * enables run_test option to skip integration * Add token caching via memcache * Update get_metadata to return {} * Diablo to Essex migration docs (bug 934328) * Added license header (bug 929663) * Add AUTHORS to the tarball * create service endpoints in sample data * Fix EC2 credentials crud after policy backend change * port common policy code to keystone * rename belongs_to to belongsTo as per the API spec * Make sure we have a port number before int it * fixes lp#949648 change belongsTo validate to name * HTTP_AUTHORIZATION was used in proxy mode * fix Nova Volume Service in sample data * fixes bug lp#948439 belongs_to and serviceCatalog behavior * removing belongs_to as a kwarg and getting from the context * adding a serviceCatalog for belongs_to calls to tokens * adding test to validate belongs_to behavior in tokens * Make bind host configurable * add more default catalog templates * Fix coverage jobs for Jenkins * Improve auth_str_equal() * Set default identity driver to sql (bug 934332) * Renamed sqlite files (bug 944951) * Isolating backtraces to DEBUG (bug 947060) * updating readme to point to developer setup docs * fixes bug 945274 * Add reseller admin capability * Remove trailing whitespaces in regular file * LDAP get_user_by_name * Added missing import (bug 944905) * add git commit date / sha1 to sphinx html docs * gitignore follow up for docs/ rename * improve auth_token middleware * Add service accounts to sample_data.sh * standardize ldap and related tests * Align with project configs * Fixes doc typo s/SERVIVE/SERVICE/ * Use constant time string comparisons for auth essex-4 ------- * Unpythonic code in redux in auth_token.py * fix pep8 * GET /v2.0 (bug 930321) * LDAP member defaults * Handle KeyError in _get_admin_auth_token * Align tox jobs with project standards * renaming pip-requires-test to test-requires * Provide request to Middleware.process_response() * Add Vary header (bug 928057) * Implement a Catalog SQL backend * Set tenantName to 'admin' in get_admin_auth_token * LDAP Identity backend * Implements extension discovery (bug 928054) * Support unicode in the keystone database * Add HEAD /tokens/{token_id} (bug 933587) * XML de/serialization (bug 928058) * fleshing out architecture docs * Update auth_token middleware so it sets X_USER_ID * Adds AUTHORS file generated from git log (and de-duplicated) * The default nova compute port is 8774 * Fix case of admin role in middleware * Fix MANIFEST.in to include missing files * Remove extraneous _validate_claims() arg * Create tools/sample_data.sh * Backslash continuations (Keystone) * Correct config name for max_pool_size * Use cfg's new print_help() method * Move cfg to keystone.openstack.common * Remove cfg dict mixin * Update cfg from openstack-common * Fix copyright dates and remove duplicate Apache licenses * some additional style bits * Add migration path for Nova auth * fix the style guide to match the code * Re-adds admin_pass/user to auth_tok middleware * Fix thinko in keystone-all sys.path hack * Removing broken & redundant code (bug 933555) * Return HTTP 401 bad user/password is specified * cli now returns an exit status cmd is invalid * Ignore sqlite.db files * Implements admin logic for tenant_list call * Implemented get_tenant_users. Fixed bug 933721 * Removing unused imports from keystone.cli * Set include_package_data=True in setup.py * Remove data_files section from setup.py * Update Manifest.in * Add migrate.cfg to data_files in setup.py * Should return 300 Multiple Choice (bug 925548) * Admin version pipeline not utilized (bug 925548) * fixes #934459 * Fix logging.config import * backport some asserts * remove pycli * Adds missing argument to add_user_to_tenant in create_user * Fixes a failure caused by a recent change to user update in the client * remove executable bit from setup.py * Raising 'NotImplmented' results in TypeError * Update docs for Swift and S3 middlewares * Added Apache 2.0 License information * Add docs on keystone_old -> ksl migration * Add token expiration * Update docs to for current keystone-manage usage * add catalog export * Handle unicode keys in memcache token backend * make sure passwords work after migration * add legacy diablo import tests * change password hash * add essex test as well * add sql for import legacy tests * add import legacy cli command * add migration from legacy db * remove keystoneclient-based manage commands * Remove executable bit from auth_token.py * Update swift token middleware * Add s3_token * Add pagination to GET /tokens * Fixes role checking for admin check * Fix webob exceptions in test_middlware * Add tests for core middleware * Add version description to root path * Add TokenNotFound exception * remove diablo tests, they aren't doing much * Fix largest memory leak in ksl tests * Add memcache token backend * Friendly JSON exceptions (bug 928061, bug 928062) * Fix comment on bcrypt and avoid hard-coding 29 as the salt length * Add SQL token backend * Add content-type to responses * Cope with unicode passwords or None * Add auth checks to ec2 credential crud operations * termie all the things * example in hacking was incorrect * Ensures duplicate users and tenants can't be made * make pip requires match nova * fixes lp:925721 adds .gitreview for redux branch * remove novaclient, fix python syntax * We don't need all the deps to check pep8 * remove extra line * Make ec2 auth actually work * fixing grammar, noting broken enable, adding hacking with prefs for project * Removed unused reference * adding a token service Driver to define the interface * Added support for DELETE /tokens/{token_id} * Fixes bug 924391 * ran through all commands to verify keywords against current (master) keystonelight * updating docs: * Fix "KeyError: 'service-header-mappings'" * updating tox.ini with test pip requirements * use our own logging module * Update auth_token middleware to support creds * Removes nova middleware and config from keystone * minor docstring update for new locations * Missed one more keystone-server * Renamed keystone-server to keystone-all based on comments in LP: #910484 * be more safe with getting json aprams * skip the two tests where testing code is failing * accept POST or PUT for tenant update * deal with reparsing the config files * don't automatically parse sys.argv for cfg * deal with tags in git checkout * fix keystoneclient tests * add tests for essex and fix the testing framework * Update docs/source/developing.rst * Change the name of keystone to keystone-server so the binaries dont conflict with python-keystoneclient * Normalize build files with current jenkins * Use gerrit instead of github * Fix pep8 violations * Add .gitreview file * Added keystone-manage list_role_grants (bug 923933) * removing unused images, cleaning up RST in docstrings from sphinx warnings * pep8 cleanup * shifting contents from _static to static * adding in testing details * moved notes from README.rst into docs/architecture.rst * updating formating for configuration page * format tweaks and moving old docs * shifting older docs into old/ directory * doc updates * moving in all the original docs from keystone * adding python keystoneclient to setup.py deps * fixing up PIP requirements for testing and virtualenv * indents * Make it as a subclass * Added shortcut for id=NULL queries (bug 916386) * fix style and termie's comments about comments * invalid params for roles.delete * initial stab at requiring adminness * Simplify code * add tests that auth with tenant user isn't member of * Add s3tokens validation * Test coverage for issue described in bug 919335 * Removing __init__ from non-packages (bug 921054) * add instructions for setting up a devenv on openSUSE 11.4 and 12.1 * Documented race condition (bug 921634) * Fix race in TestCreateTokenCommand (bug 921634) * Forgot to update models (bug 885426) * Updating example glance paste config * add a bunch of basic tests for the cli * Migrated 'enabled' int columns to bool for postgres (bug 885426) * remove this useless catalog * move cli code into a module for testing * Updated bp keystone-configuration for bp keystone-manage2 * Return Version and Tenant in Endpoints * Updated error message for keystone-manage2 * allow class names to be different from attr names * add ec2 credentials to the cli * fix middleware * Added: "UserWithPassword" Added: "UserWithOnlyEnabled" Removed: "UserWithOnlyPassword" * Update Extended Credentials (EC2, S3) * Fix for bug 921126 * Adds keystone auth-n/auth-z for Swift S3 API * Implement cfg.py * bcrypt the passwords * fix token vs auth_token * Implement Secure Token Auth * some quick fixes to cli, tests incoming * fix pep8 * fix some more pass-by-reference bugs * strip password before checking output * flip actual and expected to match common api * don't allow disabled users to authenticate * turn off echo * fix invalid_password, skip ec2 tests * Suppressed backtraces in tests causes sweaty eyes * strip password from sql backend * raise and catch correct authenticate error * rely on internal _get_user for update calls * Fixed: Inserting URLs into endpoint version attr * strip password from kvs backend * fix user_get/user_list tests * Release Notes for E3 * Addresses bug 918608 * Restore Console Info Logging - bp keystone-logging * removing the sphinx_build from setup.py, adding how to run the docs into the README * Added Vary header to support caching (bug 913895) * Implemented subparsers (bp keystone-manage2) * Handle EC2 Credentials on /tokens * ec2 docs * simple docstrings for ec2 crud * Fixed PEP8 violations and disallowed them * Implemented bp keystone-manage2 * Fixes 918535: time not properly parsed in auth_token middleware * Use dateutil 1.5 * get docs working * some cli improvements * add checks for no password attribute * Prestage fix - fixed requirement name; python-dateutil, not dateutil * users with correct credentials but disabled are forbidden not unauthorized * Pre-staging pip requires * shimming in basics from original keystone * test login fails with invalid password or disabled user * doctry * use token_client in token tests * remove duplicate pycli from pip-requires * fix ec2 sql config * get_client lets you send user and tenant * update how user is specified in tests * rename ec2 tests to be more explicit * use the sql backend for ec2 tests * more failing ec2 tests * add METADATA for boo * add (failing) tests for scoping ec2 crud * add some docs that got overwritten last night * Bug #916199: keystone-manage service list fails with AttributeError on Service.description * Exception raise error * Updates to middleware to deprecate X_USER * Revert "Exception raise error" * fix pep8 * update tests * update some names * fix some imports * split up sql backends too * split up the services and kvs backends * establish basic structure * add docs for various service managers * expect sphinx sources to be autogenned * some tiny docs * fix sphinx * testing rst on github * updating dependencies for ksl * needed to do more for cli opts * make a main in keystone-manage * fix pep8 error * rename apidoc to autodoc * Fix typo * Fix LDAP Schema Syntax (bug 904380) * return to starting directory after git work * spacing * tests for ec2 crud * add keystoneclient expected format * add sql backend, too * add an ec2 extension * update readme * Exception raise error * re-indent * re-indent * re-indent * re-indent kvs.py * re-indent test.py * remove models.py * add some docs to manager * dynamic manager classes for now * add a couple more tests * Bug #915544: keystone-manage version 1 commands broken when using flags * add some more todos * strip newlines * TODO * add role refs to validate token * fix token auth * check for membership * flush that sht * add more middleware * fixing WatchedFileHandler * logging to debugging by default for now * add a noop controller * woops * add glance middleware ?? * add legacy middleware * fix setup.py * adding #vim to file with changed indent * add id-only flag to return IDs * rename ks to keystone-manage * fixing imports for syslog handlers and gettext * adding gettext * adding logging from configuration files, default logging per common * cli using keystoneclient * add a db_sync command to bin/ks, remove others * merge test and default configs * adding project to keystone config to find default config files * some more config in bin/keystone * in the bin config too * rename many service parts to public * keystone_compat -> service * remove keystone from names, remove service * remove default configuration * basic service running again * rename extras to metadata * version number in setup.py * add basic sphinx doc bits * remove references to keystone light * renaming keystonelight to keystone * keystoneclient tests working against sql backend * run all teh keystoneclient tests against sql too * move everything over to the default config * config system overhaul * add nova's cfg framework * fix pep8 * missed a file * most tests working again * still wip, got migration mostly working * get the sql ball rolling, still wip * add sql backend, WIP * Show useful traceback if manage command fails * Fix minor typo * Add 'tenants' to Auth & Validate Response * Fixed Test Coverage Handling * Adding prettytable dependency * Front-end logging * tweaking for running regular tests in jenkins * Implement Role Model * xsd fixes * Added decorators for admin and service_admin checks * Initial keystone-manage rewrite (bp keystone-manage2) * Correct endpoint template URLs in docs * fix bug lp:843064 * finished up services stuff * add the various role tests * add list users * get user tests working * Remove install_requires processing * get endpoints test working * get tenant_add_and_remove_user test working * tenant test working again * copy over the os-ksadm extension * Implement Endpoint, Endpoint Template, and Credential Managers * PEP8 keystone cleanup * Changes run_tests.sh to also run pep8 by default * example crud extension for create_tenant * Updates to Tests/Testing * Un-pythonic methods lp:911311 Fixed pep8 problems Changed comments to docstrings * get some tests working again * merge fixes * fixup * Made tests use both service and admin endpoints * All tests but create_tenant pass * Split keystone compat by admin and service endpoints * Install a good version of pip in the venv * fix bug lp:910491 option "service_host" in keystone.conf not works * Added broken tests to show compatibility gaps * Added tox.ini file * Split keystone compat by admin and service endpoints * Implement Service Manager * Implement Tenant Manager * Fixes bug lp:910169 - Tests are using too much memory Added super() call to tearDown() method * Changed the call to create the KeystoneContextMiddleware object to pass the correct glance ConfigOpts object * Added logging on core modules * Adding logging to Auth-Token Middleware * Implement Role Manager * Refactor models and backends * Add HP-IDM extension to fix Bug 890411 * Move URL Normalizer to Frontends * move novaclient tests over also * clean up test_identity_api * clean up keystoneclient setup * Move Global Role variables out of backendutils * Bug #909255: Endpoint handling broken on SQL backend by portable-identifiers changes * add role crud * speed up tests * add basic fixture functionality * documentation driven development * novaclient now requires prettytable * Return Endpoint IDs * Correct Handling of Default Tenant * Fix duplicate logging * Added global endpoints response in XML as well * Fix: Client and Unit Tests not correctly failing a build * Bug #907521. Changes to support get roles by service * Always Return Global Endpoints * Added release notes * Fixed error with database initialization * Tests use free TCP/IP ports * Testing Refactor - this is a squash of 6 commits - original commits are vailable for cherry-picking here: https://github.com/ziadsawalha/keystone/commits/tests * Added HP-IDM documentation artifacts * whitespace * whitespace * make create_tenant work for keystone api * common ks client creation * Fixed version response (bug 891555 and bug 843052) * Implement Multiple Choices Response (bug 843051) * updating of docs * Fix LDAP schema (bug 904815) * working on a tenant_create test * standardize spacing * novaclient uses password instead of apikey * update to use the correct repo for python-novaclient * fix tenant auth tests * Updated namespace * Fixes the catalog return in d5_compat calls * Added: ./keystone-manage database goto * Added databased version check on startup w/ docs * Revised in-memory sql connection path for sqlalchemy * Clarify 'test not found' error message * Contract fix: change IDs from xsd:ID to xsd:string * Tenants - asserted all the things (bug 887844) * Support for unscoped admin tokens * LDAP: fix to keystone.ldif * Contract fix: IDs are not Ints, they are ID or string types * Contract fix: description optional * Update tracer excludes for Linux * Fixed bug 905422. Swift caching should work again. Also fixed a few other minor syntactical stuff * Update test_keystone_manage to use unittest2 * Python 2.6 subprocess.check_output doesn't exist * No more python path changes * Clarified language on migration instructions * Refactor: Workaround for python build_sphinx failure * Fixed some skipped tests * Format keystone-manage output better * Added instructions to git clone from github * Refactor: Computing api/model module paths dynamically * Introduces UID's & domain models (bp portable-identifiers) * Improved test coverage of d5 compat * Fixed: Tests returning successful (0) on failure * D5 Compatibility Support * Added original tenants blueprint to docs * Fixed broken import of version info (bug 902316) * Added missing import preventing keystone from starting (bug 901453) * Fix some issues with new version module * quantum_auth_token.py middleware fails on roles * Removed Server class from __init__.py * Fix auth_token middleware: make _verify_claims not static. Fixes bug #901049 * Pylint fixes to auth_token.py * Split version code into its own file * Change is_global == 1 to is_global == True * Bug 897496: Remove tenant id from Glance URLs * Refactor: move initialization code to class * Add missing json validation * Refactor: get rid of keystone/config.py * Fixes missed tests and subsequently introduced bugs * Rename .keystone-venv to .venv * Refactor: Rename auth controller to token controller * Added documentation * Added SSL and memcache sample config files * Updated auth_token middleware caching to support memcache * Deprecating RAX-KEY middleware * Added argparse to support python 2.3 - 2.6 * Make bin/keystone use port settings in the config file. Fixes bug #898935 * Bug#899116: use correct module when building docs * Minor RST changes * Revised extension documentation * Added documentation for SQL tables * Remove pysqlite deps. Fixes bug #898343 * Pretty-printed JSON samples * Added option to pretty-print JSON * Implements blueprint keystone-swift-acls * Updated docstring to match auth_token.py (bug 898211) * Bug #890801 Changes to support /extensions call. - Introduced a new extension reader to read static extension content. - Added additional rst files explaining extensions. - Removed functionality from additional middleware that used to support /extensions call.ie RAX-KEY-extension - Removed service extension test as it was no more relavent. - Added unit test that checks toggling of extensions. - Additional notes on the conf file * Added JSON validator; fixed samples (bug 898353) * Fixes a number of configuration/startup bugs * Fixed RST syntax (bug 898211) * Revised schema migration docs * Improved doc formatting consistency (bug 898211) * Fixed RST syntax in doc strings (bug 898211) * Added ssl docs to index; fixed rst syntax (bug 898211) * Bug-897724: Added method to list endpoints specific to a service and related tests * Eliminated debug output from sphinx_build (bug 898211) * Updated testing * Fixes bug lp:897819 * Check that endpointTemplate ID is valid in endpoint add cmd (#897749) * Added Endpoint and Endpoint Template documentation * Bug #854104 - Changes to allow admin url to be shown only for admin users. - Additional test asserts to verify * Fixed memcache tests * Update documentation and examples following API 1.1 removal * Fixes bug 843065 * Additional middleware test coverage * Enforce service ownership * Add keystone_tenant_user_admin option and fixes * Make owner the user named same as tenant/account * Restored developer default log dir * Add default for log directory and log filenames * Added wadls, pdfs, samples and functional test confs (bug 891093) * Additional documentation * ./keystone-manage endpointTemplates list missing arg (bug 891843) * Bug #890399 * Bug #891451: Changes to support update endpointTemplates call in the WADL * add an example for capability rbac * make readme use code style * add the policy code * describe and add a policy backend * policty stub * re-indent * Added timeout to bufferedhttp class and timeout setting for middleware - bug 891687 * Refactoring master to match stable/diablo fix for bug 891710 * Refactor auth_token.py to only call out to Keystone once * Added files missing from dist packaging (bug 891093) * pylintrc should not be hidden (bug 891093) * Simplified gitignore (in pursuit of bug 891093) * Fixes typo in setup document * Adding middleware tests * Remove executable bit on template * change array syntax * updates to make compatible with middleware * mergeish dolph's port change * fix tests * handle unscoped requests * adjust default port * Revised version status response (bug 890807) * Refactored headers produced by middleware (bug 835087) * move noop to identity controller * Ignoring db migrate mgmt module to workaround bug 889287 * 'text/json' should be 'application/json' (bug 843226) * Revised curl examples (bug 884789) * allow setting user_id on create * users require a name * pep8 * update test conf too * cli for adding users, tenants, extras * adjust paths and use composite apps * add tests for extras * add tenant crud * oops, forgot update in crud * add crud tests * add crud tests * add crud tests * add test for create user and get user * add test for create user and get user * re-indent identity.py * don't pep8 swp files * accept data as kwargs for crud * use the keystone app in the conf * reorg * re-indent service.py * Bug 888448: - Changes to allow validate token call return user name as per contract. - Additional test assertions to test the same. - Changes to middleware * more dyanmic client * get some initial identity api tests working * update service to middleware in confs * move around middleware * make a composite app * add crud methods to identity manager * Add a new swift auth middleware * Use TENANT_ID if it exists, but still support X_TENANT * cli beginnings * Bug 888170: Fixing references to incorrect schema * add admin port * add an etc dir * Bug #888210: Changes to fix calls to use the right path * bug 878431: Minor changes to auth_token middleware * add a default handler for / * Bug #886046 Add Quantum auth middleware to Keystone source code tree * add a stubby setup.py * use paste for the binary * add a trivial admin-only middleware * update keystone sample tests, skip one * Bug #887236: - Changes to allow extensions to be configured. - Introduced a new property that holds list of extensions that are to be enabled * add crud info to readme * get novaclient tests working * add novaclient, intermediate * add run_tests.sh and pep8 stuff * remove italics on Light * modify requirements * link diagrams * Track post-Diablo database evolution using migrations (BP: database-migrations) * Changed blatant hack (fixed spelling also) to 5 second timout as tests were not completing * Use TENANT_ID instead of TENANT for project_id * X.509 client authentication with Keystone. Implements blueprint 2-way-ssl * whitespace * added catalog tests * added tests for tokens * test the other methods too * add some tests and get others to pass * add some failing tests * add a default conf * minor whitespace cleanup * add some todo * fixed the output message error on granting user a role * Bug #884930 Support/Remove additional calls for for Tenant. - Supported call to get users for a tenant for a specific role. - Removed calls to get specific role for a user and to get all the roles for a specific tenant as they are not useful. - Fixed LDAP backend call to get users for a tenant. - Disabling Invalid pylint check * adding docs to test classes, updating run_tests.sh to match reality adding debug middleware factory adding docs on enabling debug middleware resolving pep8 issues * Fixes LP Bug#885434 - Documentation showing multiple tenants misleading * add example * rst blah blah * updated readme * authenticate and tenants working * working authenticate in keystoneclient * remove test_keystone_compat's catalog tests * add templated catalog backend * Use pure version number ("2012.1") in tarball name * Set run_tests.sh so pep8 runs in the virtualenv * bug 885364 * bug:884518 Changes to support passwordcredentials calls as per API contract. Minor LDAP code change to support tests * Fixed spelling of 'Resources' (Resoruces) * pep8 cleanup * everything but the catalog * Remove execute bit on keystone.conf * Fixes LP882760.Changes to return TenantId properly as part of roles.Additional tests to support the same * Moving contributor docs into rst (bug #843056) * fixing search sequence to not include directory structure from os.walk() * bug lp:882371 Standardize Json pagination structures * get a checkout of keystoneclient * bug lp:882233 Code changes to support API calls to fetch services/roles by name * Removed contributor doc build info from project README (bug #843056) * Revised documentation build process (bug #843056) * updates to keystone documentation - install & conf bug 843056 blueprint keystone-documentation * Specific LDAP version causing hiccups installing on latest ubuntu & fedora * Adding the concept of creating a Keystone HTTP client in Python which can be used in Keystone and imported from Keystone to allow for easier Keystone integration * Add .gitreview config file for gerrit * updating keystone developer documentation updating docstrings to remove errors in automodule generation updating setup.py to generate source documentation blueprint keystone-documentation bug 843056 * Changes to support getuser by name and gettenant by name calls * Changes to support get endpoints for token call * Additional changes to support endpointtemplates operations.Disabling pylint msgs that dont fit * Github markdown doens't seem to like irc:// links * Removed 'under construction' docs provided elsewhere * Updated self-documentation to point to docs.openstack.org * Revised documentation * Changes to endpoint operations as per OSKSCATALOG contract. Adding couple of pylint fixes * Refactored version attributes * Changes to support endpointTemplate operations as per new API.Fixed issues with command line manage stuff * Updated Secret Q&A to extend CredentialType * Changes to support API calls as per OS-KSCATALOG extension * Improved CLI error feedback (bug 877504) * authenticate working, too * base tests on keystone-diablo/stable * get tenants passing, yay * flow working, added debugging * add context to calls * move diagram into docs dir * refactor keystone compat and add catalog service * added sequence diagrams for keystone compat * Resubmitting change. Fixing issue #843226. Changes to throw appropriate faults during token validation * bug lp:865448 change abspath to dirname in controllers/version.py to correct path problems * Moving non core users and tenants calls to appropriate extensions * Fix issues in the ec2 middleware * Adding calls to get roles for user as per new format.Cleaning references to old code * Fixes LP844959, typo in Authors file * Changes to support roles and services calls via extensions. Change-Id: I1316633b30c2be07353dacdffb321791a4e2e231 * Simplified README * First commit for Secret Question and Answer Extension: RAX-KSQA * Fixing issue 854425.ie chaning token table name to tokens. Fixing issue 863667.Changes to support updation of user/tenant name as well using api calls. Fixing LDAP backend to have id independent of name.Fixing getuser call to also return name * Fixing bug 859937. Removing incorrect atom feed references from roles.xsd * Minor corrections to the middleware and wadl * Changes to show name also for the user list * Changes to show admin URL also as a part of json in endpoints listing * getting closer, need to match api now * tests running through, still failing * add a test client * added a test, need to get it working now * Use the tenant name for X_TENANT * Fix possible_topdir computing * Change roleId to role.id for swift middleware * adding in doc and setup to cover existing scripts adding doc around credentials command usage (for EC2) 2011.3 ------ * Updating legacy auth translation to 2.0 (bug #863661 * Shouldn't look in /etc/init/ for config files * Changing default admin port from 5001 to 35357, per IANA/IETF (bug #843054) * Organizing and documenting pypi requirements * sample data updates to remove -service from image and identity * Refactor and unit test json auth parsing * Error message expecting 'e' in local scope * Do not return identical error messages twice * Update auth examples in README * README.md changes to point to openstack repo * updating docs for Mac source install, no docs for mac package install relevant * POST /tokens: Added tenant id & name to scoped tokens in XML (#862752) * Updated guides.Have recompiled to use the latest examples * Fix bug 861546 * Fix swift middleware with regard to latest changes * Changes to support getTenants to behave differntly for admin users when invoked as a service api or admin api * Changes to stored hashed password in backends. Using passlib a password hashing library. Using sha512. Setting hashing to be the default behavior * Changes to WADLs to refer actual types * Revised docstring * Added /etc/init/keystone.conf to list of known configuration paths * Revising tenant IDs & Names in samples (#854228) * Authenticating against non-existent tenant (fixed #859927) * Adds list of dependencies to dev install * Fixed Anne's email address & list position (alphabetical) * Added support for scoping by tenantName * Changes to return groups as a part of RAXKSGRP extension.Also fixed incorrect schema version references in wadls and examples * Changes to support authenticate call to accept token as per agreed format * Minor changes to wadl * Making type mandatory as per sandy's request and minor fixes to wadl examples. Adding Ann as an author * Changes to structures to support authenticate using token. Minor wadl fixes. Adding Anne as an author * Removing token element from token.xsd * Update to token.xsd to allow element token as a root element in relation tu bug: https://bugs.launchpad.net/keystone/+bug/855216 - apiKeyCredentials Samples casing apiKey update * Changes to support endpoint template addition/listing by service names. Changes to list service details as well * Modified apiKeyCredentials to extend single entity and use restriction * Reorder params in User() constructor * Fix for bug 856857 - add user.name to User() constructor to re-align param * Fix for bug 856846 - cast ints to string in users_get_by_tenant_get_page so that they can be joined * POST /tokens: A chronicle of missing features * Fixes issues with ldap tests * Get Service Catalog from token * Fixes auth_token middleware to allow admin users in nova * Initial set of changes to move role operations to extensions * Updating guide wrt wadl changes * Minor Changes to extension WADL * Changes to support auth catalog as per new format * Changes to docs * Adding tenantid to user roles and endpoints * Fixes bug 855823 * Add code removed in https://code.launchpad.net/~vishvananda/nova/remove-keystone-middleware/+merge/76297 to keystone * Added support for HEAD /tokens/{token_id} Changed POST /tokens response container from 'auth' to 'access' * Making identity-admin.wadl well-formed * Converting to new doc format for included code samples * Changing authenticate request content xml as well as json * GET /tokens/{token_id}: Exposing both role ID's and Name's * Renaming 'roleRef' container to 'role' * Renaming 'roleRefs' container to 'roles' * Renaming GET /tokens/{token_id} response container to 'access' * Revised samples * Fixed path issues with keystone-import * Update validate_service_or_keystone_admin_token so that it doesn't cause exceptions if the admin or service admin haven't been configured * Changing/introducing actual extension json/xml snippets. Adding updated documents * Backend-managed role & service ID's (bug #834683) * Initial Changes to move service operations to extensions * Docs,wadls,samples,initial code to support RAX-KSKEY and OS-KSEC2 extensions. Removed tenant id from being part of endpoints * Glance Auth Token Middleware fix * Sorted AUTHORS list * adding imports from Nova for roles, tenants, users and credentials * Update keystone-manage commands to convert tenant name to id. Fixes #lp849007 * 1.Changed all Json paginated collection structure. 2.Introduced a type for credential type (path param) and change wadls and xsds. 3.Added List Users call. 4.Changed Endpoint creation example * Don't import keystone.test unless we are in testing. Fixes #lp848267 * Add toggle to run tests in-process, w/ realtime progress feedback * Add ability to run fakeldap in memory * Added backend-managed primary key to User and Tenant model * Introducing doc to support OS-KSCATALOG extensions.Adding new calls to OS-KSADM extension document * Adding initial document for OS-KSADM-admin extension.Related changes on wadl,json,xsd etc * Fixing sample content * Adding new doc.Changes to sample xmls and jsons * Validation content and relavant changes * Minor fixes on xsds and sample xmls * Fixing existing wadl.Completing wadl for extension OS-KSADM * Fix invocations of TemplateError. This exception takes precisely three parameters, so I've added a fake location (0, 0) to keep it happy * Adding wadl for OS-KSCATALOG extension.Fixing existing xsds.Fixing service wadls. Merging changes. Change-Id: Id29dc19cbc89f47e21329e531fc33bd66c14cf61 * Update Nova and Glance paste config examples * Various documentation-related changes * Consolidating xsds. Splitting contrib to admin and service * Adding guides for groups extension * Fix host/port split code in authenticate_ec2. Resolves an AttributeError: 'Ec2Credentials' object has no attribute 'partition' exception that can occur for EC2 auth validations * Adding guide for RAX-KSKEY-service extension. Adding guide for OS-KSEC2-service extension * Fix NameError exceptions in add_credentials. Adds test case on creating credentials * Redefining credential types. Defining additional extensions and renaming extensions. Removed wadls that are not needed * Fix for duplicate tag on credentials.xsd * Move tools/tracer into the keystone code. Fixes ImportError's when running keystone as a .deb package * Fixed error where endpoints returned for tenant instead of token * Updated the AUTHORS file to test the new rpc script and workflow * Update rfc.sh to use 'true' * Made it possible to integrate with external LDAP * Dev guide rebuild and minor fixes * Updates to samples, XSDs, and WADLs * Added AUTHORS, .mailmap and generate_authors.sh * Changes to support endpoint template updates * Fixes bug 831574. Adds missing sys import * Updated schema to reflect id and name changes to Users and Tenants * Updated guides and samples * Additional contract changes * Sample changes * Atom links on Token * Cleanup service it endpoint catalog * Removed redundant function from base user api * Updated samples * Fixed reference to unassigned variable * Reworked XSDs and WADL to support auth and access elements * Remove more group stuff * Removed OSX files that shouldn't be in git * Documentation cleanups * Banished .DS_Store * Add rfc.sh for git review * Wrong common namespace * XSD & sample updates * Added more missing files to MANIFEST.in * hanges to allow test to work on python 2.6.* * Cleaned up come issues with python2.6 * Refactored manage.py to be both testable and useful for testing * Sample changes to support v2.0 api * Sample changes to support v2.0 api * Admin WADL Revisions * Add the files in keystone/test/etc * Add run_tests.* to the MANIFEST.in * Keystone manage.py cleanup * Tests running on in-memory sqlite db * Additional changes to fix minor service support stuff and increase test coverage. Also making validate token call available using service admin tokens * Made all sample data loading in one script * Minor fix to run_tests * Contract changes * Admin WADL updates * Port of glance-control to keystone. This will make writing certain keystone integration functional tests a little easier to do * Updates to XML and JSON changes for validateToken * Added pylint message count as run_tests.sh -l * Added reponse handling for xsd static file rendering III Extra extension tests (for RS-KEY) * Creating an artificial whitespace merge conflict * Moved run_test logic into abstract class * Git-ignore python coverage data * Added reponse handling for xsd static file rendering * Additional tests and minor changes to support services CRUD * Added reponse handling for xsd static file rendering * Schema updates. Split WADLs and extensions and got xsds to compile * Ziads changes and fixes for them * Added check_password to abstract backend user API * Doc changes, including service catalog xsd * Fixed service-bound roles implementation in LDAP backend * Removed ldap names import from fakeldap module * fix ec2 and add keystone-manage command for creating credentials * Legacy auth fix and doc, wadl, and xsd updates * Replacing tokens with the dummy tokens from sampledata.sh * Add option for running coverage with unit2 * Adding curl documentation and additional installation doc. Also updated man documentation for keystone-manage * Changes to improve performance * Removed the need to set PYTHONPATH before tests * Back to zero PEP8 violations * Schema and WADL updates * Adding documentation to WADL * Correct 401, 305, and www-authenticate responses * Correct 401, 305, and www-authenticate responses * Correct 401, 305, and www-authenticate responses * Added xsd content, update static controller, and static tests * Updated wadl * Fix LDAP requires to compatible version * Moved password check logic to backend * Changes to delete dependencies when services,endpoint_templates,roles are being deleted. PEP8 and Pylint fixes.Also do ldap related changes * Add LDAP schema * Add wrapper for real LDAP connection with logging and type converting * Fix console and debug logging * Redux: Add proper simple_bind_s to fakeldap * Adds support for authenticating via ec2 signatures * Changes to allow additional calls to support endpoint template CRUD and additional checks on existing method * Committer: Joe Savak * Refactoring business logic behind GET /tenants to make it less convoluted * Moved run_tests.py to match other projects * Revert "Add proper simple_bind_s to fakeldap, removed all imports from ldap." * Add proper simple_bind_s to fakeldap, removed all imports from ldap * Gets Keystone a bit more inline with the way that other OpenStack projects run tests. Basically, adds the standard run_tests.sh script, modifies the run_tests.py script to do the following: * Changes to support CRUD on services/roles * Issue #115: Added support for testing multiple keystone configurations (sql-only, memcache, ldap) * Added automatic test discovery to unit tests and removed all dead tests * PEP8 fixes... all of them * Small licensing change to test Gerrit * Small change to test Gerrit * Fix brain-o--we may not need project_ref, but we do need to create the project! * updated README with more accurate swift info * Determine is_admin based on 'Admin' role; remove dead project_ref code; pass auth_token into request context; pass user_id/project_id into request context instead of their refs * Added support for versioned openstack MIME types * #16 Changes to remove unused group clls * Add unittest2 to pip requires for testing * #66 Change in variable cases * #66 Change in variable cases * Changes to make cache time configurable * Changes to store tokens using memcache #66 * Changes suggested by Ziad.Adding validateToken operation * Flow diagram to support keystone service registration * Restored identity.wadl w/ system test * pylint fixes for role api * Removing attribute duplicated from superclass; causes an issue in py 2.7 * pylint fixes for tenant-group unit tests * pylint fixes for server unit tests * Making the API version configurable per API request * PEP8 fixes for system tests * Issue #13: Added support for Accept-appropriate 404 responses w/ tests for json & xml * Simple change to test gerrit * Document how to allow anonymous access * Sigh. Proofreading.. * Update README with instructions to fix segfault * These changes make no sense--I didn't do them, and I'm in sync! * Add middleware for glance integration * #3 Preventing creation of users with empty user id and pwds * Fixing naming conflict with builtin function next() * This makes the use of set_enabled more clear * Fixes failing test introduced after disabled check remove * Changes to allow password updates even when the user is disabled.Also fixed failing tests * Disabled users should now be returned by GET /users/{user_id} * Updating a disabled user (via xml) should now succeed * Updating a disabled user should now succeed * Noted potential issue, but I'm not sure if this is dead code or not anyway? * Assigned Base API classes so downstream code knows what to expect * Adding missing class variable declaration * Cleaning up unit tests * Removes disabled checks from get_user and update_user * Fixing module-level variable naming issues * Improving variable naming consistency * Avoiding overloading of built-in: type() * Fixing indentation * Specified python-ldap version, which appears to avoid the packaging issues we've experienced * Added missing import * More LDAP tweaks * LDAP backend updates * More test fixes * Fixed deprecation warning * Updated test to allow for additional role * Restored UnauthorizedFaults to token validation requests * Fix for issue #85 * - System test framework can now assert specific response codes automatically - Revised system test for issue #85 based on clarification from Ziad - Added system test to attempt admin action using a service token * Adds the member role to sampledata, gives it to joeuser * PEP8 fixes * Formatting * Merged duplicate code * Add first implementation of LDAP backend * Added (failing) system test for issue #13 * Minor cleanup * Made all API methods raise NotImplementedError if they are not implemented in backend * Made delete_all_endpoint calm if there is nothing to do * Fixed bug causing request body setting to fail * Add check to sqlalchemy backed to prevent loud crush * Tweaked import_module to clearly import module if it can * Removed hardcoded references to sql backends * Add exception throwing and logging to keystone-manage * Merging keystone.auth_protocols package into keystone.middleware * - Added 'automatic' admin authentication to KeystoneTestCase using bootstrapped user - Added system tests for admin & service authentication - Abstracted '/v2.0' path prefix away from system tests - Added simple uuid function to generate data for system tests (random number gen w/ seeds might work better?) - Refactored issue #85 tests with setUp & tearDown methods * Clarifying test case * Fixed minor pylint issues * Removed tenant id from admin user * Move dev guide to OpenStack * Commented out failing request, until it's review * Wrote test case for github issue #85 * Formatting change * Was this a typo or an incredibly lame joke? * Added missing imports and fixed a few pylint issues * Improved dict formatting * Improved readability a bit * Abstracted underlying HTTP behavior away from RestfulTestCase Added 'automatic' JSON body encoding (TODO: automatic XML encoding) Improved user-feedback on automatic response status assertion * Added run_tests.py to keystone.test.system, which uses bootstrap db script * Added bootstrap configuration script (with admin user assigned an Admin role) * Added 'automatic' token auth for each API * Refactored port configuration strategy to allow a single test case to address both the admin and service API's * Added automatic json/xml parsing to system test framework * Added system test discovery to run_tests.py * Added system tests for content type handling and url rewriting * Updated tests to reflect last bug fix * Extracted sample test from framework and moved system test framework into __init__ * Converted system test framework to use httplib * Initial system test approach, using urllib2 * Fixed bug: traceback thrown when the path '/' is requested * Updated *unused* tests to reflect refactored API's * Removed some useless/dead code * Cleaned up authentication tests * Improved readability slightly * Moved db imports to config module Removed useless try/except blocks * Organized imports * Simplified a few util functions * Fixed line length * Renamed service API configuration options * Renamed ServiceApi router module * Renamed ServiceApi router * Cleaned up keystone.logic * Removed unused logger * Refactored routers and controllers into their own modules (issue #44) * Fixed doc string * Improved PEP8 compliance * Fixed spelling * Removed unused import * Slightly simplified base wsgi router * Added note about run_tests.py to readme * Organized imports * Improved readme consistency * pep8 * Pylint an pep8 fixes * Fixing bug reported using with swift * Fixed default content type behavior (was defaulting to XML) * Removed redundant action mappings (for version controller) * Renamed exthandler to urlrewritefilter to better illustrate it's purpose * Minor comment change * Refactored URL extensions handling (for .json/.xml) Added universal support for optional trailing slashes * Return users in a tenant as part of a many-to-many relationship * Added import, autoformatting * Removed unused imports * Moved exthandler to keystone.middleware * ** keystone.conf refactoring ** * Fixed 'is_xml_response' function, which had no clear intention * Removed unused function * Rewrote .json/.xml extension handler with additional unit test * Added links to readme * Added python-ldap to pip-requires * Initialized LDAP backend * Various fixes for test running * Commented out suspicious unit tests..... * Added test automation script * Cleaned up file * Added missing test files to test collection * Made unit tests executable from the cmd line * Added test_auth to list of unit tests * Update auth test to account for generic service names * Changes to make Admin for keystone configurable.#27 * Remove old initializers * Changes to introduce BaseAPI to support multiple back ends * Changes to support dynamic loading of models * Adding list of todos * Initial changes to support multiple backends * Fixed identity.wadl response - issue #71# * Recompiled devguide with endpoints and templates * Removed unnecessary symlink * Changes to support endpoints and endpointemplates (renaming BaseUrls and BaseURLRefs) * Make swift middleware live where it should * Remove swift-y bits from generic token auth * Changes on Sample data * Code changes to support global endpointTemplates * Swift-specific middleware * Issue 31: Switching default ports to 5000/5001 (public/admin) * Fixed readme instructions for Nova - Issue #55 * Fixed requires for development and in readme * Bringing back the changes to support endpointTemplates and endpoints * Readme fix * Edited keystone/auth_protocols/nova_auth_token.py via GitHub * Issue 32: Updated readme to reflect fix for issue 32 (removed 'cd bin' prefixes before several commands) * Issue 32: bin/sampledata.sh cannot be executed outside of bin/ * Issue 32: ./bin/keystone cannot be executed outside of bin/ * Issue 31: Reverted ports to 8080/8081 while the issue is under discussion * Adding endpoint related files * Updated readme to reflect docs/ -> doc/ change Added tools/pip-requires-dev for depelopment dependencies * Basic authorization for swift * Republished developer guide for Jun 21, 2011 * Updated token validation sample xml (dev guide) * Updated dev guide publish date * Added developer guide build folder to git ignore list * Auto-formatted and syntacically validated every JSON example in the doc guide * working with dashboard * add get_tenants * rudimentary login working * most bits working * initial * Reverting change thats not needed * Fixing some of the failing tests * Merging changes from trunk * demo of membership using keystone in sampledata * Name changes BaseURLRefs to EndPoints and BaseURLs to EndpointTemplates * Fixed formatting, imports * Issue 31: Updated docs and examples * Committing unit test configuration for issue 31 * Issue 31: Changed default ports to 80/8080 * Issue #8: Renamed primary key of Token to 'id' * Name changes BaseURLRefs to EndPoints and BaseURLs to EndpointTemplates * Changes to hash password * Restored tools.tracer to bin/ scripts; included fix for empty frames * Merging changes * Removed unused import * Removed redundant sentence in dev guide * Removed unused imports in bin/ * Fix for keystone issue 41: https://github.com/rackspace/keystone/issues/41 * Merging changes from rackspace * Fixed spelling error * Changes to include support for paginations * Fixing existing methods on wadl * Fixed broken unit test code * Refactored api function names to avoid redundancy with new module names * Changes to wadl to support user operations * Refactored DB API into modules by model * Pep8 changes * Changes to allow user creation without a tenant * for got to change a 1.1 to 1.0 * dash needs both 1.0 and 1.1 compatability - need to fix that! * nova needs 1.0 api currently * Some field validations * Merged docs * make sampledata executable again * Admin for nova doesn't take a tenant * add keystone to its own service catalog * Fixed error on UrlExtensionFilterTest * Fixed imports; improved PEP8 formatting compliance * Fixed imports in keystone.common * Removed unused imports and denoted unused variables * Fixed imports in auth_protocols * Removed duplicated function * Added coverage to pip development requirements * Fixed relative & unused imports * Adding py init to functional tests * Created pip requirements file for development env (added sphinx python doc generation to start) * Added pydev files to gitignore * Added py init files to directories already being referenced as modules * Users must have tenants or nova breaks * Doc updates and dev requires * Resolved conflicts * To PUT or to POST * Fixed v1.0 auth test to account for cdn baseURL order * Support for GET /v2.0/users and add cdn back to sampledata for v1.0 support * Update the baseURL data pushed into glance * Fix symlinks after docs -> doc rename * Adding call to modify tenant.Adding more tests and fixing minor issue * Added pip requirements file for testing environments * Grammar corrections * Adds Sphinx build ability and RST documentation * Removing unused references to UserTenantAssociation * Introduced a method to get all users @Users resource.Also moved the method to get user groups out of tenant scope * Changed BaseURLs to OpenStack names * Test fixes * Seperating user calls from tenants * Improved README formatting/consistency * Updated paths to unit/function tests in README * Updated docs: sampledata.sh can't be executed outside of bin/ * Added Routes and httplib2 to production dependencies * Correcting typo * Setup.py fix * Readd test folder * Forgot to add doc file * Moved tests to keystone folder and removed old management tools - issue #26 * Updated SWIFT endpoint default * Update to dev guide explaining admin call auth requirements * Update sample data and keystone-manage for local install of OpenStack * Put updated Swift Quickstart into README.md * API v2.0 Proposal * Doc updates.Minor keyston-manage changes * Doc updates * Doc updates * set nova admin role if keystone user has "Admin" role * keystone repo is now at github.com/rackspace/keystone * Add success test for GET /v2.0/tokens/ in json and xml * Add Admin API tests for v2 authentication * Add test verifying a missing tenantId key in the password creds works properly in JSON * Rename file.Ziad suggestion * Name changes suggested by Ziad * Minor fixes * Code cleanup * PEP8 changes * Removing redundant files * Changing to legacy auth to standard wsgi middleware.Name change of some of the files * Changing to legacy auth to standard wsgi middleware * Introducing new frontend component to handle rackspace legacy calls * Introducing new frontend component to handle rackspace legacy calls * keystone repo is now at github.com/rackspace/keystone * Add success test for GET /v2.0/tokens/ in json and xml * Add Admin API tests for v2 authentication * Add test verifying a missing tenantId key in the password creds works properly in JSON * Removing debug print * Changes to return service urls for Auth1.0 style calls * Changes to return service urls for Auth1.0 style calls * Updating tests and sample data * Merging changes from rackspace * Changes to support service catalog * pep8 * Added URLs to sampledata * Support for listing BaseURL refs in keystone-manage * Support transforming service catalog * Removing remerged comments * Adding roles as comma seperated values on a single header * Changes to support getTenants call for user with admin privelage and regular user * Add more test cases for v2 authentication for bad requests and unauthorized results * Add test case for verifying GET /v2.0/tokens returns 404 Not Found * It's possible to authenticate through the Admin API * Changes on auth basic middleware component to return roles.Also changes on the application to return roles not tied to a tenant * Update the sample to reflect some minor enhancements to the base framework * Add test for validate_token * Save expiration data for later comparison * Don't need to fiddle around with user tokens here, just admin tokens * Get and revoke both admin and user tokens.. * Merging changes * Bah, somehow my sample data failed to include Admin as admin's role * Merging changes * Merging changes * Merging changes * Meging changes * Changes to also return role references as a part of user when get token call is made for a specific tenant * Use un-spaced exception names.. * Try to use an admin credential to revoke the token * Split the Keystone service from the Admin service so we can test both * The API is a moving target; update the test * Support for listing roles in keystone-manage * Adds unit testing base class that takes care of much of the tedium around setting up test fixtures. This first commit just demoes the new test case functionality with a new test case /test/unit/test_authn_v2.py * pep8 * Fixed issue #6 * Support POST /tokens only - issue #5 * Added quick start guide to integrating Swift and Keystone; fixed setup.py tokenauth filter installation * Added role and user data to sampledata.sh * Additional unit tests for base url refs.Minor code refactorings * Changes to support baseurlrefs operations * MD cleanup * md futzing * More readme cleanup * Merged DTest tests and moved ini file to examples/paste * moved paste example to examples * Readme updates * Just making sure leading whitespace is stripped if automated * to->too * Updated dev guide * Add a sample to document how to create tests * Add a test for authenticate/revoke_token * Ensure that --username, --password, and --keystone are given * Build base classes for tests * Documentation fixes to versions * Build the skeleton necessary to run tests * Add x_auth_token header to most methods * Make sure we don't lose the body completely if we can't json.load() it * Add debugging messages * Add a property to get the RESTClient instance * Fix up get()/put()/post()/delete() calls to make_req() * Deal with the case that no headers are provided * Deal more intelligently with empty strings * Listing technologies to integrate * Um, queries are supposed to be optional, all others required * Properly join relative paths * Apparently "/token" is actually spelled "/tokens" * Accidentally left out the reqwrapper argument * Sketch in a basis for the Keystone API 2.0 * Make argument order a little more natural * Fixing unit tests.Introduced support for global roles * Don't let self._path be the empty string * self._scheme isn't set yet * Don't add a field if there isn't one.. * Create a simple means of building a REST-based API * Fixing unit tests for user and groups * Docs * Link fix * API Spec updates * More /token -> /tokens fixes * /tokens instead of /token * Prep for move to git@github.com:rackspace/keystone.git * Made URL relative * pep-8 and minor mapping fix * Dev guide update - BaseURLs and Roles * Update docs on how to use nova.sh to deploy openstack on cloud servers * Changes to support calls to getBaseUrls * Changes to support /tokens on docbook and minor roleref changes * Changes to support roleref calls * Updated to use X_USER as decided in Issue 49 * Updated with feedback from https://github.com/khussein/keystone/issues/49#issuecomment-1237312 * Fix for issue 49 - parse X_AUTHORIZATION header for user_id * Fixed issue where user tenant not returned in GET /token - related to issue #49 * user should be what keystone returns * Fixed issue #54 * Updated to use X_USER as decided in Issue 49 * Updated with feedback from https://github.com/khussein/keystone/issues/49#issuecomment-1237312 * Fix for issue 49 - parse X_AUTHORIZATION header for user_id * Minor changes to the document * Changes to unique relationship definition * Adding more tests for roleref operations * Fixed issue where user tenant not returned in GET /token - related to issue #49 * Changes to support /tokens on docbook and minor roleref changes * Changes to support roleref calls * user should be what keystone returns * midnight typo * Added examples readme * Fixed issue #54 * Link to latest dev guide in readme * Instructions to run with Nova * Documentation update and new API spec * Updates to README * Updates to README * Updates to README * Updates to README * Updates to README * Updates to README * Fix up broken setup.py scripts list * -Removed .project file from project and added it to .gitignore -Moved pylintrc -> .pylintrc, personal preference that this file should be available, but not seen -Moved echo to examples directory, seemed a bit odd to be in the top level -Moved management directory to tools, seemed a bit odd to be in the top level -Moved pip-requires to tools/, and updated the reference to it in README.md * Fix the identity.wadl symlink * keystone src directory needs symlinked * remove copy&paste ware from nova_auth_token and use auth_token middleware * Flow diagrams * simple flow diagrams * Multi-tenant token fixes * Fixed invalid tenant authentication * Fix error in tenant_is_empty (model has changed) * Fixed debug/verbose flag processing * update readme * keep nova_auth_token in keystone * Changes to support /Roles calls.Removing create call from being exposed as of now * Changes to support /Roles calls.Description included * Changes to support /Roles calls * Readme merge * Readme updaes for load testing * hack nova_auth_token to work * removing unused library * Changes to support roles and baseurls on wadl * Changes to support roles and baseurls on wadl * Changes to support roles and baseURLs * missed some nova reqs * information on using nova_auth_token * lazy provisioning for nova * readme fixes * Merged in anotherjesse's changes * New model working with echo_client.py * Missed a file * Added tracing and modified model * echo_client should be executable * move nova's path injection to management scripts * server.py/version.py shouldn't be executable while cli tools should * spacing for readme * Add keystone-manage to support bootstrapping Keystone with add user command * Setup.py update * Updated logging and parameterization for bin scripts * Minor readme fixes * Simplified running Keystone and Updated readme * v1 compatibility and Service/Admin API split * DocBook Changes * Merging HCL changes - pull 40 * Changes to support baseurls and roles on the document.Adding sample files * Changes to support baseurls and roles on the document * Adding xsds to support roles and baseurls * More version fixes * Initial commit * Make config compatible with legacy * Move to v2.0 * Changes to move the db settings to conf file * removing bottle * Adding Accept header to is_xml_response logic * Removing bottle dependencies * Mae Pylintrc, reordered imports made pep8 of the files * Foundation for some server and auth unit tests * Added as per HACKING Files * pylint fixes * fixes * fixed test cases * Merged api,service,server,test_common * Added test cases for add user to a tenanat * multi token test cases and bug fixes * Moved all Server functions to utils.py * Fixed failing test - bug introduced in cleanup * Added pylint and cleanup from last commit * Merged pull 37. Removes bottle, adds configuration, and adds daemonization * fixed pylint * fixed bugs * fixes * fixes * removed backslashes * Added functionality add user to a tenant * fixes * Pep8 test_users.py * checking SSLv3 problems * checking SSLv3 problems * checking SSLv3 problems * checking git push problems * Optimised test_users.py * Modified the README and README.md * fixed bug raised when included exthandler * Removed unwanted file * removed unused run method * Added PEP8 to test cases * Removed importing objects from keystone * pylintrc optimization * optimization of test cases and handling multi token * fixes * Nochanges * Modified the README for keystone-control issue * Modified the README * Added PEP8 for remaining test cases * PEP8 for test cases by praveena * renamed test_identity.py to test_keystone * added pidfile and removed print statement from test_common * fixes * removed print statement * Added keystone.log to ignore list * Modified server.py tenant group URL to fix failing test cases * Added *.log to gitignore * neglect changes * Added new script to run all tests * Modified and tests. Tests groups throwing some minor errors still * Modified and commented the code * Split the test cases into individual files Fixed Bugs of api * Made PEP8 of server * Too much of duplication and incomplete conflict resolution in test_identity.py * Sisirhs changes * Sai and Praveena's Changes * Added missing tests, mad e enable and disable password work * merged conflicts * test cases modfications and bug fixes * Renamed to server.py and added top dir in config * Added the keystone top dir in configuration * Modified the README * latest updates * latest updates * new merge with installation fixes * A brief README for the auth-server * Added keystone-control * chasing tenant group bug * Added tests for the URL extension middleware * modified keystone-control and reshuffling of file names * Adding unit test for the URL extension handler * Modified test cases * Yes, I modified, but I wont commit * merged Sai changes * Installation of keystone done * corrects charset=utf=8 * Working on echo server * one more push * move the template code from bottle into a separate file: * modified auth_server.py * Added echod and renamed echo.py to server.py * Minor cleanup + pep8 * merging changes from sai branch * saving changes to auth_server.py * get version implementation s Please enter the commit message for your changes. Lines starting * get_version_info is still not working * in the middle of get_version_info * Modified test_identity * removed .auth.serve.py.swp * Added some more functions through Routes and mapper * Update for Abdul * My Changes part 2 * modified Resposne to resp=Response() * My Changes * minor tweak * Some more cleaning up of git merges * Cleaning up of git merges * Added glance type of eventlet, because of its plug and play which meets the need of running everything independently if needed * pep8 and fixes * Readme updates * Removed keystone.db - should be generated by ORM * Removed extra files from last commit * Removed Global groups tests, which still needs to be tested. Updated README on how to run unit test * Deleted keystone.db * Merged pagination * Git problems - lingering commit * Renamed identity.py to server.py and added bin directory * Adding router to requires. Updating standards in HACKING. Removing schema (generated from ORM) * Added pagination functionality and tenant_group functionality with unit tests * Removing unused imports * Removing unused function * unwanted file * added the code that would go to hussein repo * Added tenant groups in identity, created test cases for tenant groups * Added latest changes to sirish branch with pagination for get tenants * Annotate TODOs * argument handling in echo.py * getting pep8-y with it * Merged conflicts * Basic auth and refactor * more pep8 * testing merging * get _tenants pagination updates * Merging keystone code * Basic Auth support * 17: query extension works * Issue 17: Adding tests * removed \r chararcter from unit directory * removed windows newline characters from management folder * removed unwanted files * Adding First kestone repo * Add Description File * sai added by sai * Foo2 * Foo * Initial * Minor changes + call using WSGI instead of bottle * Restored remoteauth * Reverted accidental(?) WADL deletion >:-( * Renamed protocol modules to auth_[type] Renamed PAPIAuth to RemoteAuth - better documented it and added redirect to auth_token (to stop using this) Cleaned up ini files and ini file handling (removed hard-coded defaults) * simple json cleanups for tests * pep8-ize * Added protocol stubs (openid and basic auth) * Renamed delegated to 'delay_auth_decision' Remove PAPIAuth Rename folder to Auth_protocols (that is where we add protocol components)Get_request -> get_content Make protocol module more generic (prepare for superclassing and multiple protocol support Refactor Auth_protocol_token If no token, bail out quick (clearer) same with if app Break out headers: - here is what is coming in - here is what we add - explain the X in headers: extended header * Updated Readme, and added TODO * Added XML/Json tests to the identity and updated the README * Fixed issue with standalone install * Updated readme * Fixed remote proxy issue * draft remote proxy: needs fixing * Updated readme and echo_client * Adding remote echo ini file * Fixes to middleware, ini parameters, and support for running echo remotely * replaced localhost with config * modifide middleware; echo_client works * Fixing and documenting middleware * Merged pull request #30 from cloudbuilders/master * Updated management scripts to use SQLAlchemy * Fixed SQLAlchemy db location to keystone directory * Added unit tests and updated the README.md on how to run it * made echo test work * get_request is actually init model from request contents * missed simplejson assumption * finish removing simplejson * pythonizing * update fault to be pythonic * remove unpythonic properties from atom and tenant * error decorator and logging unhandled errors * missed auth_data * fix typos * more pythonic * we don't need properties yet * use string formating * use relative import in init * fixed paste configs to run without eggs * Fixed mistake in port for echo service * Added echo_client.py * keystone.db should be in keystone dir * pep8 / whitespace * gitignore pyc files * split out running and installing sections in readme * allow apps to be run without setup.py * add command for test database to readme * echo has a separate setup.py * httplib2 isn't used * spacing * add httplib2 to deps and sort them * Added pip-requires and updated readme to include missing deps * explict installs for python libraries * update readme formating * update readme to be markdown * Updated readme * Doc fixes * Friendly error message if a user is not associated with a tenant * Ensure schema complience assertion is on in all tests * Whoops, details element is optional in faults * Remove identity (1) stuff and renamed identity2 to identity * Added wadl and xsd contract links * Adjust reletive links in schema * Comment seperators * Init version links * Initial version support * Initial extensions support * Initial update tenant * Make sure we don't delete non-empty tenants * Initial delete tenant * Initial getTenant * Minor updates to tests * Initial implementation of get tenants * added unit tests in test/unit/test_keystone.py * Initial create tenant * Minor bug when serializing tenant to JSON * Schema update * Whoops forgot 409 in JSON as well! * Whoops missed 409 on create tenant * setup.py fix * Minor fixes * pep-8 cleanup of model * More pep-8 cleanup * Minor fixes * Some pep-8 cleanup * Initial revoke token * Initial support for authenticate * Whoops, bad user data * Initial working validate token * Whoops need to convert datetimes to iso format * Test updates * tokenId should not be a string! * Cleaned up validate token call * Full check admin token with soap ui tests * Some SQL testing scripts * Initial check admin token from db * made identity.py pep8 compliant * Better error handling * Initial full response to authenticate token, still having issues with errors * Stubb for token calls * Initial prototype of default token based auth protocol * Initial deserialization of tenant * Initial deserialization of password credentials * SQL Alchemy additions: Token * SQL Alchemy additions * Whoops pep8 * Output serialization of faults * XML and JSON rendering on tenant/s * Translations of auth to XML and JSON * Sample service.py with sqlalchemy * Fixed relative path issue * sqlalchemy draft * Initial service.py * Cleaned up setup.py * Added collections * Initial atom link type * Initial fault type * Initial tenant type * PEP-8 for echo.py * Initial auth types * Readme update * Fixed identity.py and some styling * Minor updates * Keystone WSGI and eventlet * Corrected how to run echo service * Replaced paster with eventlet for echo service * Added create tables in README and modified keystone.db to reflect the new schema * Merged identity functions second time * Sync * Whoops should have never checked this in * all management files except user add and delete from group * Management files except for add/delete user from group * Updated README * Setup PasteDeploy and configured PAPIAuth * reorganization of files * Add SOAPUI projects * Resolved Conflicts * Removed Conflicts * dos2unix * Deleted IDE files * Importing from DevTeam * Import from DevTeam * updates DevTeam * Code by Dev Team * Added Power API Auth Middleware * removed unused libraries * Dev Team: validate_token , create_user ( created for test purpose) and update_tenant * Added to README * Fixed bug in echo.py * Whoops forgot auth header * Instructions for soapUI * Add WADL links for convenience * Initial work into paste deploy...commen out for now * Added echo.wadl * Fixed for case with missing accept header * Added content nagotiation * Use XSL to convert * Better quote handling * Add JSON transform * Whoops samples don't match * XSD for echo service * Initial echo service * Updates to identity.py and README * Added X-Auth-Token * Added extensions * Updated errors for extension requests * Added getTenant, updateTenant, deleteTenant * Added get and create tenants * Initial WADL with token operations * Added faults * Remove refrences to usernameConflict and groupConflict * Added common extensions * Added api.xsd schema index * Added XSD 1.1 and atom linking support * Made the tenant xsd extensible * Initial tenant xsd * Made the token schema extensible * Initial token schema * Groups should have ids instead of names? * Added Creating Tenants, JSON only * Remove mention of service catalog * Updated samples * Updated pubdate * Updates to intro section * Updated concepts * Better entities in document * Removed init section from docs, we'll get to them later * Added Dependencies section * Added License & Create/Delete user management CLI * Initial docs import * Created DB with users table, simple schema * first commit keystone-9.0.0/PKG-INFO0000664000567000056710000000463012701407246015625 0ustar jenkinsjenkins00000000000000Metadata-Version: 1.1 Name: keystone Version: 9.0.0 Summary: OpenStack Identity Home-page: http://docs.openstack.org/developer/keystone/ Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN Description: ================== OpenStack Keystone ================== Keystone provides authentication, authorization and service discovery mechanisms via HTTP primarily for use by projects in the OpenStack family. It is most commonly deployed as an HTTP interface to existing identity systems, such as LDAP. Developer documentation, the source of which is in ``doc/source/``, is published at: http://docs.openstack.org/developer/keystone/ The API specification and documentation are available at: http://specs.openstack.org/openstack/keystone-specs/ The canonical client library is available at: https://git.openstack.org/cgit/openstack/python-keystoneclient Documentation for cloud administrators is available at: http://docs.openstack.org/ The source of documentation for cloud administrators is available at: https://git.openstack.org/cgit/openstack/openstack-manuals Information about our team meeting is available at: https://wiki.openstack.org/wiki/Meetings/KeystoneMeeting Bugs and feature requests are tracked on Launchpad at: https://bugs.launchpad.net/keystone Future design work is tracked at: http://specs.openstack.org/openstack/keystone-specs/#identity-program-specifications Contributors are encouraged to join IRC (``#openstack-keystone`` on freenode): https://wiki.openstack.org/wiki/IRC For information on contributing to Keystone, see ``CONTRIBUTING.rst``. Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 keystone-9.0.0/rally-jobs/0000775000567000056710000000000012701407246016603 5ustar jenkinsjenkins00000000000000keystone-9.0.0/rally-jobs/README.rst0000664000567000056710000000032412701407102020260 0ustar jenkinsjenkins00000000000000This directory contains rally benchmark scenarios to be run by OpenStack CI. * more about rally: https://wiki.openstack.org/wiki/Rally * how to add rally-gates: https://wiki.openstack.org/wiki/Rally/RallyGates keystone-9.0.0/rally-jobs/keystone.yaml0000664000567000056710000000527112701407102021324 0ustar jenkinsjenkins00000000000000--- KeystoneBasic.create_user: - runner: type: "constant" times: 100 concurrency: 10 sla: failure_rate: max: 0 KeystoneBasic.create_delete_user: - runner: type: "constant" times: 100 concurrency: 10 sla: failure_rate: max: 0 KeystoneBasic.create_and_list_users: - runner: type: "constant" times: 100 concurrency: 10 sla: failure_rate: max: 0 KeystoneBasic.create_user_update_password: - args: password_length: 10 runner: type: "constant" times: 100 concurrency: 10 sla: failure_rate: max: 0 KeystoneBasic.create_and_list_tenants: - runner: type: "constant" times: 100 concurrency: 10 sla: failure_rate: max: 0 KeystoneBasic.get_entities: - runner: type: "constant" times: 100 concurrency: 10 sla: failure_rate: max: 0 KeystoneBasic.add_and_remove_user_role: - runner: type: "constant" times: 100 concurrency: 10 context: users: tenants: 5 users_per_tenant: 4 sla: failure_rate: max: 0 KeystoneBasic.create_and_delete_role: - runner: type: "constant" times: 100 concurrency: 10 sla: failure_rate: max: 0 KeystoneBasic.create_add_and_list_user_roles: - runner: type: "constant" times: 100 concurrency: 10 context: users: tenants: 5 users_per_tenant: 4 sla: failure_rate: max: 0 KeystoneBasic.create_tenant: - runner: type: "constant" times: 50 concurrency: 10 sla: failure_rate: max: 0 KeystoneBasic.create_tenant_with_users: - args: users_per_tenant: 10 runner: type: "constant" times: 50 concurrency: 10 sla: failure_rate: max: 0 KeystoneBasic.create_update_and_delete_tenant: - runner: type: "constant" times: 50 concurrency: 10 sla: failure_rate: max: 0 KeystoneBasic.create_and_delete_service: - runner: type: "constant" times: 50 concurrency: 10 sla: failure_rate: max: 0 KeystoneBasic.create_and_list_services: - runner: type: "constant" times: 50 concurrency: 10 sla: failure_rate: max: 0 keystone-9.0.0/MANIFEST.in0000664000567000056710000000073112701407102016253 0ustar jenkinsjenkins00000000000000include AUTHORS include babel.cfg include ChangeLog include CONTRIBUTING.rst include LICENSE include HACKING.rst include README.rst include openstack-common.conf include run_tests.sh include setup.cfg include setup.py include tox.ini include etc/* include httpd/* graft bin graft doc graft keystone/tests graft tools graft examples recursive-include keystone *.json *.xml *.cfg *.pem README *.po *.pot *.sql global-exclude *.pyc *.sdx *.log *.db *.swp keystone/tests/tmp/* keystone-9.0.0/tox.ini0000664000567000056710000000773512701407105016046 0ustar jenkinsjenkins00000000000000[tox] minversion = 1.6 skipsdist = True envlist = py34,py27,pep8,docs,genconfig,releasenotes [testenv] usedevelop = True install_command = pip install -U {opts} {packages} setenv = VIRTUAL_ENV={envdir} deps = -r{toxinidir}/test-requirements.txt .[ldap,memcache,mongodb] commands = find keystone -type f -name "*.pyc" -delete bash tools/pretty_tox.sh '{posargs}' whitelist_externals = bash find passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY PBR_VERSION [testenv:py34] commands = find keystone -type f -name "*.pyc" -delete bash tools/pretty_tox_py3.sh [testenv:legacy_drivers] deps = -r{toxinidir}/test-requirements.txt nose .[ldap,memcache,mongodb] commands = # Run each legacy test separately, to avoid SQL model redefinitions find keystone -type f -name "*.pyc" -delete nosetests -v \ keystone/tests/unit/backend/legacy_drivers/assignment/V8/sql.py nosetests -v \ keystone/tests/unit/backend/legacy_drivers/role/V8/sql.py nosetests -v \ keystone/tests/unit/backend/legacy_drivers/federation/V8/api_v3.py nosetests -v \ keystone/tests/unit/backend/legacy_drivers/resource/V8/sql.py [testenv:pep8] deps = .[bandit] {[testenv]deps} commands = flake8 # Run bash8 during pep8 runs to ensure violations are caught by # the check and gate queues bashate examples/pki/gen_pki.sh # Check that .po and .pot files are valid. bash -c "find keystone -type f -regex '.*\.pot?' -print0| \ xargs -0 -n 1 msgfmt --check-format -o /dev/null" # Run security linter bandit -r keystone -x tests [testenv:bandit] # NOTE(browne): This is required for the integration test job of the bandit # project. Please do not remove. deps = .[bandit] commands = bandit -r keystone -x tests [testenv:cover] commands = find keystone -type f -name "*.pyc" -delete python setup.py testr --coverage --testr-args='{posargs}' [testenv:venv] commands = {posargs} [testenv:debug] commands = find keystone -type f -name "*.pyc" -delete oslo_debug_helper {posargs} passenv = KSTEST_ADMIN_URL KSTEST_ADMIN_USERNAME KSTEST_ADMIN_PASSWORD KSTEST_ADMIN_DOMAIN_ID KSTEST_PUBLIC_URL KSTEST_USER_USERNAME KSTEST_USER_PASSWORD KSTEST_USER_DOMAIN_ID KSTEST_PROJECT_ID [testenv:functional] basepython = python3.4 deps = -r{toxinidir}/test-requirements.txt setenv = OS_TEST_PATH=./keystone/tests/functional commands = find keystone -type f -name "*.pyc" -delete python setup.py testr --slowest --testr-args='{posargs}' passenv = KSTEST_ADMIN_URL KSTEST_ADMIN_USERNAME KSTEST_ADMIN_PASSWORD KSTEST_ADMIN_DOMAIN_ID KSTEST_PUBLIC_URL KSTEST_USER_USERNAME KSTEST_USER_PASSWORD KSTEST_USER_DOMAIN_ID KSTEST_PROJECT_ID [flake8] filename= *.py,keystone-all,keystone-manage show-source = true # D100: Missing docstring in public module # D101: Missing docstring in public class # D102: Missing docstring in public method # D103: Missing docstring in public function # D104: Missing docstring in public package # D105: Missing docstring in magic method # D202: No blank lines allowed after docstring. # D203: 1 blank required before class docstring. # D205: Blank line required between one-line summary and description. # D400: First line should end with a period. # D401: First line should be in imperative mood. ignore = D100,D101,D102,D103,D104,D105,D203,D205,D400,D401 exclude=.venv,.git,.tox,build,dist,doc,*openstack/common*,*lib/python*,*egg,tools,vendor,.update-venv,*.ini,*.po,*.pot max-complexity=24 [testenv:docs] commands= bash -c "rm -rf doc/build" bash -c "rm -rf doc/source/api" python setup.py build_sphinx [testenv:releasenotes] commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html [testenv:genconfig] commands = oslo-config-generator --config-file=config-generator/keystone.conf [hacking] import_exceptions = keystone.i18n six.moves local-check-factory = keystone.tests.hacking.checks.factory keystone-9.0.0/keystone/0000775000567000056710000000000012701407246016366 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/v2_crud/0000775000567000056710000000000012701407246017732 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/v2_crud/__init__.py0000664000567000056710000000000012701407102022020 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/v2_crud/user_crud.py0000664000567000056710000001200312701407102022262 0ustar jenkinsjenkins00000000000000# Copyright 2012 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import uuid from oslo_log import log from keystone.common import dependency from keystone.common import extension from keystone.common import wsgi from keystone import exception from keystone import identity from keystone.models import token_model LOG = log.getLogger(__name__) extension.register_public_extension( 'OS-KSCRUD', { 'name': 'OpenStack Keystone User CRUD', 'namespace': 'http://docs.openstack.org/identity/api/ext/' 'OS-KSCRUD/v1.0', 'alias': 'OS-KSCRUD', 'updated': '2013-07-07T12:00:0-00:00', 'description': 'OpenStack extensions to Keystone v2.0 API ' 'enabling User Operations.', 'links': [ { 'rel': 'describedby', 'type': 'text/html', 'href': 'http://developer.openstack.org/' 'api-ref-identity-v2-ext.html', } ]}) @dependency.requires('catalog_api', 'identity_api', 'resource_api', 'token_provider_api') class UserController(identity.controllers.User): def set_user_password(self, context, user_id, user): token_id = context.get('token_id') original_password = user.get('original_password') token_data = self.token_provider_api.validate_token(token_id) token_ref = token_model.KeystoneToken(token_id=token_id, token_data=token_data) if token_ref.user_id != user_id: raise exception.Forbidden('Token belongs to another user') if original_password is None: raise exception.ValidationError(target='user', attribute='original password') try: user_ref = self.identity_api.authenticate( context, user_id=token_ref.user_id, password=original_password) if not user_ref.get('enabled', True): # NOTE(dolph): why can't you set a disabled user's password? raise exception.Unauthorized('User is disabled') except AssertionError: raise exception.Unauthorized() update_dict = {'password': user['password'], 'id': user_id} admin_context = copy.copy(context) admin_context['is_admin'] = True super(UserController, self).set_user_password(admin_context, user_id, update_dict) # Issue a new token based upon the original token data. This will # always be a V2.0 token. # TODO(morganfainberg): Add a mechanism to issue a new token directly # from a token model so that this code can go away. This is likely # not the norm as most cases do not need to yank apart a token to # issue a new one. new_token_ref = {} metadata_ref = {} roles_ref = None new_token_ref['user'] = user_ref if token_ref.bind: new_token_ref['bind'] = token_ref.bind if token_ref.project_id: new_token_ref['tenant'] = self.resource_api.get_project( token_ref.project_id) if token_ref.role_names: roles_ref = [dict(name=value) for value in token_ref.role_names] if token_ref.role_ids: metadata_ref['roles'] = token_ref.role_ids if token_ref.trust_id: metadata_ref['trust'] = { 'id': token_ref.trust_id, 'trustee_user_id': token_ref.trustee_user_id} new_token_ref['metadata'] = metadata_ref new_token_ref['id'] = uuid.uuid4().hex catalog_ref = self.catalog_api.get_catalog(user_id, token_ref.project_id) new_token_id, new_token_data = self.token_provider_api.issue_v2_token( token_ref=new_token_ref, roles_ref=roles_ref, catalog_ref=catalog_ref) LOG.debug('TOKEN_REF %s', new_token_data) return new_token_data class Router(wsgi.ComposableRouter): """Provides a subset of CRUD operations for internal data types.""" def add_routes(self, mapper): user_controller = UserController() mapper.connect('/OS-KSCRUD/users/{user_id}', controller=user_controller, action='set_user_password', conditions=dict(method=['PATCH'])) keystone-9.0.0/keystone/v2_crud/admin_crud.py0000664000567000056710000002101012701407102022372 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone import assignment from keystone import catalog from keystone.common import extension from keystone.common import wsgi from keystone import identity from keystone import resource extension.register_admin_extension( 'OS-KSADM', { 'name': 'OpenStack Keystone Admin', 'namespace': 'http://docs.openstack.org/identity/api/ext/' 'OS-KSADM/v1.0', 'alias': 'OS-KSADM', 'updated': '2013-07-11T17:14:00-00:00', 'description': 'OpenStack extensions to Keystone v2.0 API ' 'enabling Administrative Operations.', 'links': [ { 'rel': 'describedby', 'type': 'text/html', 'href': 'http://developer.openstack.org/' 'api-ref-identity-v2-ext.html', } ]}) class Router(wsgi.ComposableRouter): """Previously known as the OS-KSADM extension. Provides a bunch of CRUD operations for internal data types. """ def add_routes(self, mapper): tenant_controller = resource.controllers.Tenant() assignment_tenant_controller = ( assignment.controllers.TenantAssignment()) user_controller = identity.controllers.User() role_controller = assignment.controllers.Role() assignment_role_controller = assignment.controllers.RoleAssignmentV2() service_controller = catalog.controllers.Service() endpoint_controller = catalog.controllers.Endpoint() # Tenant Operations mapper.connect( '/tenants', controller=tenant_controller, action='create_project', conditions=dict(method=['POST'])) mapper.connect( '/tenants/{tenant_id}', controller=tenant_controller, action='update_project', conditions=dict(method=['PUT', 'POST'])) mapper.connect( '/tenants/{tenant_id}', controller=tenant_controller, action='delete_project', conditions=dict(method=['DELETE'])) mapper.connect( '/tenants/{tenant_id}/users', controller=assignment_tenant_controller, action='get_project_users', conditions=dict(method=['GET'])) # User Operations mapper.connect( '/users', controller=user_controller, action='get_users', conditions=dict(method=['GET'])) mapper.connect( '/users', controller=user_controller, action='create_user', conditions=dict(method=['POST'])) # NOTE(termie): not in diablo mapper.connect( '/users/{user_id}', controller=user_controller, action='update_user', conditions=dict(method=['PUT'])) mapper.connect( '/users/{user_id}', controller=user_controller, action='delete_user', conditions=dict(method=['DELETE'])) # COMPAT(diablo): the copy with no OS-KSADM is from diablo mapper.connect( '/users/{user_id}/password', controller=user_controller, action='set_user_password', conditions=dict(method=['PUT'])) mapper.connect( '/users/{user_id}/OS-KSADM/password', controller=user_controller, action='set_user_password', conditions=dict(method=['PUT'])) # COMPAT(diablo): the copy with no OS-KSADM is from diablo mapper.connect( '/users/{user_id}/tenant', controller=user_controller, action='update_user', conditions=dict(method=['PUT'])) mapper.connect( '/users/{user_id}/OS-KSADM/tenant', controller=user_controller, action='update_user', conditions=dict(method=['PUT'])) # COMPAT(diablo): the copy with no OS-KSADM is from diablo mapper.connect( '/users/{user_id}/enabled', controller=user_controller, action='set_user_enabled', conditions=dict(method=['PUT'])) mapper.connect( '/users/{user_id}/OS-KSADM/enabled', controller=user_controller, action='set_user_enabled', conditions=dict(method=['PUT'])) # User Roles mapper.connect( '/users/{user_id}/roles/OS-KSADM/{role_id}', controller=assignment_role_controller, action='add_role_to_user', conditions=dict(method=['PUT'])) mapper.connect( '/users/{user_id}/roles/OS-KSADM/{role_id}', controller=assignment_role_controller, action='remove_role_from_user', conditions=dict(method=['DELETE'])) # COMPAT(diablo): User Roles mapper.connect( '/users/{user_id}/roleRefs', controller=assignment_role_controller, action='get_role_refs', conditions=dict(method=['GET'])) mapper.connect( '/users/{user_id}/roleRefs', controller=assignment_role_controller, action='create_role_ref', conditions=dict(method=['POST'])) mapper.connect( '/users/{user_id}/roleRefs/{role_ref_id}', controller=assignment_role_controller, action='delete_role_ref', conditions=dict(method=['DELETE'])) # User-Tenant Roles mapper.connect( '/tenants/{tenant_id}/users/{user_id}/roles/OS-KSADM/{role_id}', controller=assignment_role_controller, action='add_role_to_user', conditions=dict(method=['PUT'])) mapper.connect( '/tenants/{tenant_id}/users/{user_id}/roles/OS-KSADM/{role_id}', controller=assignment_role_controller, action='remove_role_from_user', conditions=dict(method=['DELETE'])) # Service Operations mapper.connect( '/OS-KSADM/services', controller=service_controller, action='get_services', conditions=dict(method=['GET'])) mapper.connect( '/OS-KSADM/services', controller=service_controller, action='create_service', conditions=dict(method=['POST'])) mapper.connect( '/OS-KSADM/services/{service_id}', controller=service_controller, action='delete_service', conditions=dict(method=['DELETE'])) mapper.connect( '/OS-KSADM/services/{service_id}', controller=service_controller, action='get_service', conditions=dict(method=['GET'])) # Endpoint Templates mapper.connect( '/endpoints', controller=endpoint_controller, action='get_endpoints', conditions=dict(method=['GET'])) mapper.connect( '/endpoints', controller=endpoint_controller, action='create_endpoint', conditions=dict(method=['POST'])) mapper.connect( '/endpoints/{endpoint_id}', controller=endpoint_controller, action='delete_endpoint', conditions=dict(method=['DELETE'])) # Role Operations mapper.connect( '/OS-KSADM/roles', controller=role_controller, action='create_role', conditions=dict(method=['POST'])) mapper.connect( '/OS-KSADM/roles', controller=role_controller, action='get_roles', conditions=dict(method=['GET'])) mapper.connect( '/OS-KSADM/roles/{role_id}', controller=role_controller, action='get_role', conditions=dict(method=['GET'])) mapper.connect( '/OS-KSADM/roles/{role_id}', controller=role_controller, action='delete_role', conditions=dict(method=['DELETE'])) keystone-9.0.0/keystone/assignment/0000775000567000056710000000000012701407246020536 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/assignment/backends/0000775000567000056710000000000012701407246022310 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/assignment/backends/__init__.py0000664000567000056710000000000012701407102024376 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/assignment/backends/sql.py0000664000567000056710000003174512701407102023462 0ustar jenkinsjenkins00000000000000# Copyright 2012-13 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone import assignment as keystone_assignment from keystone.common import sql from keystone import exception from keystone.i18n import _ class AssignmentType(object): USER_PROJECT = 'UserProject' GROUP_PROJECT = 'GroupProject' USER_DOMAIN = 'UserDomain' GROUP_DOMAIN = 'GroupDomain' @classmethod def calculate_type(cls, user_id, group_id, project_id, domain_id): if user_id: if project_id: return cls.USER_PROJECT if domain_id: return cls.USER_DOMAIN if group_id: if project_id: return cls.GROUP_PROJECT if domain_id: return cls.GROUP_DOMAIN # Invalid parameters combination raise exception.AssignmentTypeCalculationError(**locals()) class Assignment(keystone_assignment.AssignmentDriverV9): def default_role_driver(self): return 'sql' def default_resource_driver(self): return 'sql' def create_grant(self, role_id, user_id=None, group_id=None, domain_id=None, project_id=None, inherited_to_projects=False): assignment_type = AssignmentType.calculate_type( user_id, group_id, project_id, domain_id) try: with sql.session_for_write() as session: session.add(RoleAssignment( type=assignment_type, actor_id=user_id or group_id, target_id=project_id or domain_id, role_id=role_id, inherited=inherited_to_projects)) except sql.DBDuplicateEntry: # nosec : The v3 grant APIs are silent if # the assignment already exists pass def list_grant_role_ids(self, user_id=None, group_id=None, domain_id=None, project_id=None, inherited_to_projects=False): with sql.session_for_read() as session: q = session.query(RoleAssignment.role_id) q = q.filter(RoleAssignment.actor_id == (user_id or group_id)) q = q.filter(RoleAssignment.target_id == (project_id or domain_id)) q = q.filter(RoleAssignment.inherited == inherited_to_projects) return [x.role_id for x in q.all()] def _build_grant_filter(self, session, role_id, user_id, group_id, domain_id, project_id, inherited_to_projects): q = session.query(RoleAssignment) q = q.filter_by(actor_id=user_id or group_id) q = q.filter_by(target_id=project_id or domain_id) q = q.filter_by(role_id=role_id) q = q.filter_by(inherited=inherited_to_projects) return q def check_grant_role_id(self, role_id, user_id=None, group_id=None, domain_id=None, project_id=None, inherited_to_projects=False): with sql.session_for_read() as session: try: q = self._build_grant_filter( session, role_id, user_id, group_id, domain_id, project_id, inherited_to_projects) q.one() except sql.NotFound: actor_id = user_id or group_id target_id = domain_id or project_id raise exception.RoleAssignmentNotFound(role_id=role_id, actor_id=actor_id, target_id=target_id) def delete_grant(self, role_id, user_id=None, group_id=None, domain_id=None, project_id=None, inherited_to_projects=False): with sql.session_for_write() as session: q = self._build_grant_filter( session, role_id, user_id, group_id, domain_id, project_id, inherited_to_projects) if not q.delete(False): actor_id = user_id or group_id target_id = domain_id or project_id raise exception.RoleAssignmentNotFound(role_id=role_id, actor_id=actor_id, target_id=target_id) def add_role_to_user_and_project(self, user_id, tenant_id, role_id): try: with sql.session_for_write() as session: session.add(RoleAssignment( type=AssignmentType.USER_PROJECT, actor_id=user_id, target_id=tenant_id, role_id=role_id, inherited=False)) except sql.DBDuplicateEntry: msg = ('User %s already has role %s in tenant %s' % (user_id, role_id, tenant_id)) raise exception.Conflict(type='role grant', details=msg) def remove_role_from_user_and_project(self, user_id, tenant_id, role_id): with sql.session_for_write() as session: q = session.query(RoleAssignment) q = q.filter_by(actor_id=user_id) q = q.filter_by(target_id=tenant_id) q = q.filter_by(role_id=role_id) if q.delete() == 0: raise exception.RoleNotFound(message=_( 'Cannot remove role that has not been granted, %s') % role_id) def _get_user_assignment_types(self): return [AssignmentType.USER_PROJECT, AssignmentType.USER_DOMAIN] def _get_group_assignment_types(self): return [AssignmentType.GROUP_PROJECT, AssignmentType.GROUP_DOMAIN] def _get_project_assignment_types(self): return [AssignmentType.USER_PROJECT, AssignmentType.GROUP_PROJECT] def _get_domain_assignment_types(self): return [AssignmentType.USER_DOMAIN, AssignmentType.GROUP_DOMAIN] def _get_assignment_types(self, user, group, project, domain): """Returns a list of role assignment types based on provided entities If one of user or group (the "actor") as well as one of project or domain (the "target") are provided, the list will contain the role assignment type for that specific pair of actor and target. If only an actor or target is provided, the list will contain the role assignment types that satisfy the specified entity. For example, if user and project are provided, the return will be: [AssignmentType.USER_PROJECT] However, if only user was provided, the return would be: [AssignmentType.USER_PROJECT, AssignmentType.USER_DOMAIN] It is not expected that user and group (or project and domain) are specified - but if they are, the most fine-grained value will be chosen (i.e. user over group, project over domain). """ actor_types = [] if user: actor_types = self._get_user_assignment_types() elif group: actor_types = self._get_group_assignment_types() target_types = [] if project: target_types = self._get_project_assignment_types() elif domain: target_types = self._get_domain_assignment_types() if actor_types and target_types: return list(set(actor_types).intersection(target_types)) return actor_types or target_types def list_role_assignments(self, role_id=None, user_id=None, group_ids=None, domain_id=None, project_ids=None, inherited_to_projects=None): def denormalize_role(ref): assignment = {} if ref.type == AssignmentType.USER_PROJECT: assignment['user_id'] = ref.actor_id assignment['project_id'] = ref.target_id elif ref.type == AssignmentType.USER_DOMAIN: assignment['user_id'] = ref.actor_id assignment['domain_id'] = ref.target_id elif ref.type == AssignmentType.GROUP_PROJECT: assignment['group_id'] = ref.actor_id assignment['project_id'] = ref.target_id elif ref.type == AssignmentType.GROUP_DOMAIN: assignment['group_id'] = ref.actor_id assignment['domain_id'] = ref.target_id else: raise exception.Error(message=_( 'Unexpected assignment type encountered, %s') % ref.type) assignment['role_id'] = ref.role_id if ref.inherited: assignment['inherited_to_projects'] = 'projects' return assignment with sql.session_for_read() as session: assignment_types = self._get_assignment_types( user_id, group_ids, project_ids, domain_id) targets = None if project_ids: targets = project_ids elif domain_id: targets = [domain_id] actors = None if group_ids: actors = group_ids elif user_id: actors = [user_id] query = session.query(RoleAssignment) if role_id: query = query.filter_by(role_id=role_id) if actors: query = query.filter(RoleAssignment.actor_id.in_(actors)) if targets: query = query.filter(RoleAssignment.target_id.in_(targets)) if assignment_types: query = query.filter(RoleAssignment.type.in_(assignment_types)) if inherited_to_projects is not None: query = query.filter_by(inherited=inherited_to_projects) return [denormalize_role(ref) for ref in query.all()] def delete_project_assignments(self, project_id): with sql.session_for_write() as session: q = session.query(RoleAssignment) q = q.filter_by(target_id=project_id).filter( RoleAssignment.type.in_((AssignmentType.USER_PROJECT, AssignmentType.GROUP_PROJECT)) ) q.delete(False) def delete_role_assignments(self, role_id): with sql.session_for_write() as session: q = session.query(RoleAssignment) q = q.filter_by(role_id=role_id) q.delete(False) def delete_domain_assignments(self, domain_id): with sql.session_for_write() as session: q = session.query(RoleAssignment) q = q.filter(RoleAssignment.target_id == domain_id).filter( (RoleAssignment.type == AssignmentType.USER_DOMAIN) | (RoleAssignment.type == AssignmentType.GROUP_DOMAIN)) q.delete(False) def delete_user_assignments(self, user_id): with sql.session_for_write() as session: q = session.query(RoleAssignment) q = q.filter_by(actor_id=user_id).filter( RoleAssignment.type.in_((AssignmentType.USER_PROJECT, AssignmentType.USER_DOMAIN)) ) q.delete(False) def delete_group_assignments(self, group_id): with sql.session_for_write() as session: q = session.query(RoleAssignment) q = q.filter_by(actor_id=group_id).filter( RoleAssignment.type.in_((AssignmentType.GROUP_PROJECT, AssignmentType.GROUP_DOMAIN)) ) q.delete(False) class RoleAssignment(sql.ModelBase, sql.DictBase): __tablename__ = 'assignment' attributes = ['type', 'actor_id', 'target_id', 'role_id', 'inherited'] # NOTE(henry-nash): Postgres requires a name to be defined for an Enum type = sql.Column( sql.Enum(AssignmentType.USER_PROJECT, AssignmentType.GROUP_PROJECT, AssignmentType.USER_DOMAIN, AssignmentType.GROUP_DOMAIN, name='type'), nullable=False) actor_id = sql.Column(sql.String(64), nullable=False) target_id = sql.Column(sql.String(64), nullable=False) role_id = sql.Column(sql.String(64), nullable=False) inherited = sql.Column(sql.Boolean, default=False, nullable=False) __table_args__ = ( sql.PrimaryKeyConstraint('type', 'actor_id', 'target_id', 'role_id', 'inherited'), sql.Index('ix_actor_id', 'actor_id'), ) def to_dict(self): """Override parent method with a simpler implementation. RoleAssignment doesn't have non-indexed 'extra' attributes, so the parent implementation is not applicable. """ return dict(self.items()) keystone-9.0.0/keystone/assignment/V8_role_backends/0000775000567000056710000000000012701407246023706 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/assignment/V8_role_backends/__init__.py0000664000567000056710000000000012701407102025774 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/assignment/V8_role_backends/sql.py0000664000567000056710000000560612701407102025055 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone import assignment from keystone.common import sql from keystone import exception class Role(assignment.RoleDriverV8): @sql.handle_conflicts(conflict_type='role') def create_role(self, role_id, role): with sql.session_for_write() as session: ref = RoleTable.from_dict(role) session.add(ref) return ref.to_dict() @sql.truncated def list_roles(self, hints): with sql.session_for_read() as session: query = session.query(RoleTable) refs = sql.filter_limit_query(RoleTable, query, hints) return [ref.to_dict() for ref in refs] def list_roles_from_ids(self, ids): if not ids: return [] else: with sql.session_for_read() as session: query = session.query(RoleTable) query = query.filter(RoleTable.id.in_(ids)) role_refs = query.all() return [role_ref.to_dict() for role_ref in role_refs] def _get_role(self, session, role_id): ref = session.query(RoleTable).get(role_id) if ref is None: raise exception.RoleNotFound(role_id=role_id) return ref def get_role(self, role_id): with sql.session_for_read() as session: return self._get_role(session, role_id).to_dict() @sql.handle_conflicts(conflict_type='role') def update_role(self, role_id, role): with sql.session_for_write() as session: ref = self._get_role(session, role_id) old_dict = ref.to_dict() for k in role: old_dict[k] = role[k] new_role = RoleTable.from_dict(old_dict) for attr in RoleTable.attributes: if attr != 'id': setattr(ref, attr, getattr(new_role, attr)) ref.extra = new_role.extra return ref.to_dict() def delete_role(self, role_id): with sql.session_for_write() as session: ref = self._get_role(session, role_id) session.delete(ref) class RoleTable(sql.ModelBase, sql.DictBase): __tablename__ = 'role' attributes = ['id', 'name'] id = sql.Column(sql.String(64), primary_key=True) name = sql.Column(sql.String(255), unique=True, nullable=False) extra = sql.Column(sql.JsonBlob()) __table_args__ = (sql.UniqueConstraint('name'),) keystone-9.0.0/keystone/assignment/schema.py0000664000567000056710000000163412701407102022343 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common.validation import parameter_types _role_properties = { 'name': parameter_types.name } role_create = { 'type': 'object', 'properties': _role_properties, 'required': ['name'], 'additionalProperties': True } role_update = { 'type': 'object', 'properties': _role_properties, 'minProperties': 1, 'additionalProperties': True } keystone-9.0.0/keystone/assignment/__init__.py0000664000567000056710000000125512701407102022641 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.assignment import controllers # noqa from keystone.assignment.core import * # noqa keystone-9.0.0/keystone/assignment/role_backends/0000775000567000056710000000000012701407246023331 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/assignment/role_backends/__init__.py0000664000567000056710000000000012701407102025417 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/assignment/role_backends/sql.py0000664000567000056710000001747212701407102024504 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db import exception as db_exception from keystone import assignment from keystone.common import driver_hints from keystone.common import sql from keystone import exception # NOTE(henry-nash): From the manager and above perspective, the domain_id # attribute of a role is nullable. However, to ensure uniqueness in # multi-process configurations, it is better to still use a sql uniqueness # constraint. Since the support for a nullable component of a uniqueness # constraint across different sql databases is mixed, we instead store a # special value to represent null, as defined in NULL_DOMAIN_ID below. NULL_DOMAIN_ID = '<>' class Role(assignment.RoleDriverV9): @sql.handle_conflicts(conflict_type='role') def create_role(self, role_id, role): with sql.session_for_write() as session: ref = RoleTable.from_dict(role) session.add(ref) return ref.to_dict() @driver_hints.truncated def list_roles(self, hints): # If there is a filter on domain_id and the value is None, then to # ensure that the sql filtering works correctly, we need to patch # the value to be NULL_DOMAIN_ID. This is safe to do here since we # know we are able to satisfy any filter of this type in the call to # filter_limit_query() below, which will remove the filter from the # hints (hence ensuring our substitution is not exposed to the caller). for f in hints.filters: if (f['name'] == 'domain_id' and f['value'] is None): f['value'] = NULL_DOMAIN_ID with sql.session_for_read() as session: query = session.query(RoleTable) refs = sql.filter_limit_query(RoleTable, query, hints) return [ref.to_dict() for ref in refs] def list_roles_from_ids(self, ids): if not ids: return [] else: with sql.session_for_read() as session: query = session.query(RoleTable) query = query.filter(RoleTable.id.in_(ids)) role_refs = query.all() return [role_ref.to_dict() for role_ref in role_refs] def _get_role(self, session, role_id): ref = session.query(RoleTable).get(role_id) if ref is None: raise exception.RoleNotFound(role_id=role_id) return ref def get_role(self, role_id): with sql.session_for_read() as session: return self._get_role(session, role_id).to_dict() @sql.handle_conflicts(conflict_type='role') def update_role(self, role_id, role): with sql.session_for_write() as session: ref = self._get_role(session, role_id) old_dict = ref.to_dict() for k in role: old_dict[k] = role[k] new_role = RoleTable.from_dict(old_dict) for attr in RoleTable.attributes: if attr != 'id': setattr(ref, attr, getattr(new_role, attr)) ref.extra = new_role.extra return ref.to_dict() def delete_role(self, role_id): with sql.session_for_write() as session: ref = self._get_role(session, role_id) session.delete(ref) def _get_implied_role(self, session, prior_role_id, implied_role_id): query = session.query( ImpliedRoleTable).filter( ImpliedRoleTable.prior_role_id == prior_role_id).filter( ImpliedRoleTable.implied_role_id == implied_role_id) try: ref = query.one() except sql.NotFound: raise exception.ImpliedRoleNotFound( prior_role_id=prior_role_id, implied_role_id=implied_role_id) return ref @sql.handle_conflicts(conflict_type='implied_role') def create_implied_role(self, prior_role_id, implied_role_id): with sql.session_for_write() as session: inference = {'prior_role_id': prior_role_id, 'implied_role_id': implied_role_id} ref = ImpliedRoleTable.from_dict(inference) try: session.add(ref) except db_exception.DBReferenceError: # We don't know which role threw this. # Query each to trigger the exception. self._get_role(session, prior_role_id) self._get_role(session, implied_role_id) return ref.to_dict() def delete_implied_role(self, prior_role_id, implied_role_id): with sql.session_for_write() as session: ref = self._get_implied_role(session, prior_role_id, implied_role_id) session.delete(ref) def list_implied_roles(self, prior_role_id): with sql.session_for_read() as session: query = session.query( ImpliedRoleTable).filter( ImpliedRoleTable.prior_role_id == prior_role_id) refs = query.all() return [ref.to_dict() for ref in refs] def list_role_inference_rules(self): with sql.session_for_read() as session: query = session.query(ImpliedRoleTable) refs = query.all() return [ref.to_dict() for ref in refs] def get_implied_role(self, prior_role_id, implied_role_id): with sql.session_for_read() as session: ref = self._get_implied_role(session, prior_role_id, implied_role_id) return ref.to_dict() class ImpliedRoleTable(sql.ModelBase, sql.DictBase): __tablename__ = 'implied_role' attributes = ['prior_role_id', 'implied_role_id'] prior_role_id = sql.Column( sql.String(64), sql.ForeignKey('role.id', ondelete="CASCADE"), primary_key=True) implied_role_id = sql.Column( sql.String(64), sql.ForeignKey('role.id', ondelete="CASCADE"), primary_key=True) @classmethod def from_dict(cls, dictionary): new_dictionary = dictionary.copy() return cls(**new_dictionary) def to_dict(self): """Return a dictionary with model's attributes. overrides the `to_dict` function from the base class to avoid having an `extra` field. """ d = dict() for attr in self.__class__.attributes: d[attr] = getattr(self, attr) return d class RoleTable(sql.ModelBase, sql.DictBase): def to_dict(self, include_extra_dict=False): d = super(RoleTable, self).to_dict( include_extra_dict=include_extra_dict) if d['domain_id'] == NULL_DOMAIN_ID: d['domain_id'] = None return d @classmethod def from_dict(cls, role_dict): if 'domain_id' in role_dict and role_dict['domain_id'] is None: new_dict = role_dict.copy() new_dict['domain_id'] = NULL_DOMAIN_ID else: new_dict = role_dict return super(RoleTable, cls).from_dict(new_dict) __tablename__ = 'role' attributes = ['id', 'name', 'domain_id'] id = sql.Column(sql.String(64), primary_key=True) name = sql.Column(sql.String(255), nullable=False) domain_id = sql.Column(sql.String(64), nullable=False, server_default=NULL_DOMAIN_ID) extra = sql.Column(sql.JsonBlob()) __table_args__ = (sql.UniqueConstraint('name', 'domain_id'),) keystone-9.0.0/keystone/assignment/core.py0000664000567000056710000023035312701407102022035 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Main entry point into the Assignment service.""" import abc import copy from oslo_cache import core as oslo_cache from oslo_config import cfg from oslo_log import log from oslo_log import versionutils import six from keystone.common import cache from keystone.common import dependency from keystone.common import driver_hints from keystone.common import manager from keystone import exception from keystone.i18n import _ from keystone.i18n import _LI, _LE, _LW from keystone import notifications CONF = cfg.CONF LOG = log.getLogger(__name__) # This is a general cache region for assignment administration (CRUD # operations). MEMOIZE = cache.get_memoization_decorator(group='role') # This builds a discrete cache region dedicated to role assignments computed # for a given user + project/domain pair. Any write operation to add or remove # any role assignment should invalidate this entire cache region. COMPUTED_ASSIGNMENTS_REGION = oslo_cache.create_region() MEMOIZE_COMPUTED_ASSIGNMENTS = cache.get_memoization_decorator( group='role', region=COMPUTED_ASSIGNMENTS_REGION) @notifications.listener @dependency.provider('assignment_api') @dependency.requires('credential_api', 'identity_api', 'resource_api', 'revoke_api', 'role_api') class Manager(manager.Manager): """Default pivot point for the Assignment backend. See :class:`keystone.common.manager.Manager` for more details on how this dynamically calls the backend. """ driver_namespace = 'keystone.assignment' _PROJECT = 'project' _ROLE_REMOVED_FROM_USER = 'role_removed_from_user' _INVALIDATION_USER_PROJECT_TOKENS = 'invalidate_user_project_tokens' def __init__(self): assignment_driver = CONF.assignment.driver # If there is no explicit assignment driver specified, we let the # identity driver tell us what to use. This is for backward # compatibility reasons from the time when identity, resource and # assignment were all part of identity. if assignment_driver is None: msg = _('Use of the identity driver config to automatically ' 'configure the same assignment driver has been ' 'deprecated, in the "O" release, the assignment driver ' 'will need to be expicitly configured if different ' 'than the default (SQL).') versionutils.report_deprecated_feature(LOG, msg) try: identity_driver = dependency.get_provider( 'identity_api').driver assignment_driver = identity_driver.default_assignment_driver() except ValueError: msg = _('Attempted automatic driver selection for assignment ' 'based upon [identity]\driver option failed since ' 'driver %s is not found. Set [assignment]/driver to ' 'a valid driver in keystone config.') LOG.critical(msg) raise exception.KeystoneConfigurationError(msg) super(Manager, self).__init__(assignment_driver) # Make sure it is a driver version we support, and if it is a legacy # driver, then wrap it. if isinstance(self.driver, AssignmentDriverV8): self.driver = V9AssignmentWrapperForV8Driver(self.driver) elif not isinstance(self.driver, AssignmentDriverV9): raise exception.UnsupportedDriverVersion(driver=assignment_driver) self.event_callbacks = { notifications.ACTIONS.deleted: { 'domain': [self._delete_domain_assignments], }, } def _delete_domain_assignments(self, service, resource_type, operations, payload): domain_id = payload['resource_info'] self.driver.delete_domain_assignments(domain_id) def _get_group_ids_for_user_id(self, user_id): # TODO(morganfainberg): Implement a way to get only group_ids # instead of the more expensive to_dict() call for each record. return [x['id'] for x in self.identity_api.list_groups_for_user(user_id)] def list_user_ids_for_project(self, tenant_id): self.resource_api.get_project(tenant_id) assignment_list = self.list_role_assignments( project_id=tenant_id, effective=True) # Use set() to process the list to remove any duplicates return list(set([x['user_id'] for x in assignment_list])) def _list_parent_ids_of_project(self, project_id): if CONF.os_inherit.enabled: return [x['id'] for x in ( self.resource_api.list_project_parents(project_id))] else: return [] @MEMOIZE_COMPUTED_ASSIGNMENTS def get_roles_for_user_and_project(self, user_id, tenant_id): """Get the roles associated with a user within given project. This includes roles directly assigned to the user on the project, as well as those by virtue of group membership or inheritance. :returns: a list of role ids. :raises keystone.exception.ProjectNotFound: If the project doesn't exist. """ self.resource_api.get_project(tenant_id) assignment_list = self.list_role_assignments( user_id=user_id, project_id=tenant_id, effective=True) # Use set() to process the list to remove any duplicates return list(set([x['role_id'] for x in assignment_list])) @MEMOIZE_COMPUTED_ASSIGNMENTS def get_roles_for_user_and_domain(self, user_id, domain_id): """Get the roles associated with a user within given domain. :returns: a list of role ids. :raises keystone.exception.DomainNotFound: If the domain doesn't exist. """ self.resource_api.get_domain(domain_id) assignment_list = self.list_role_assignments( user_id=user_id, domain_id=domain_id, effective=True) # Use set() to process the list to remove any duplicates return list(set([x['role_id'] for x in assignment_list])) def get_roles_for_groups(self, group_ids, project_id=None, domain_id=None): """Get a list of roles for this group on domain and/or project.""" if project_id is not None: self.resource_api.get_project(project_id) assignment_list = self.list_role_assignments( source_from_group_ids=group_ids, project_id=project_id, effective=True) elif domain_id is not None: assignment_list = self.list_role_assignments( source_from_group_ids=group_ids, domain_id=domain_id, effective=True) else: raise AttributeError(_("Must specify either domain or project")) role_ids = list(set([x['role_id'] for x in assignment_list])) return self.role_api.list_roles_from_ids(role_ids) def add_user_to_project(self, tenant_id, user_id): """Add user to a tenant by creating a default role relationship. :raises keystone.exception.ProjectNotFound: If the project doesn't exist. :raises keystone.exception.UserNotFound: If the user doesn't exist. """ self.resource_api.get_project(tenant_id) try: self.role_api.get_role(CONF.member_role_id) self.driver.add_role_to_user_and_project( user_id, tenant_id, CONF.member_role_id) except exception.RoleNotFound: LOG.info(_LI("Creating the default role %s " "because it does not exist."), CONF.member_role_id) role = {'id': CONF.member_role_id, 'name': CONF.member_role_name} try: self.role_api.create_role(CONF.member_role_id, role) except exception.Conflict: LOG.info(_LI("Creating the default role %s failed because it " "was already created"), CONF.member_role_id) # now that default role exists, the add should succeed self.driver.add_role_to_user_and_project( user_id, tenant_id, CONF.member_role_id) COMPUTED_ASSIGNMENTS_REGION.invalidate() @notifications.role_assignment('created') def _add_role_to_user_and_project_adapter(self, role_id, user_id=None, group_id=None, domain_id=None, project_id=None, inherited_to_projects=False, context=None): # The parameters for this method must match the parameters for # create_grant so that the notifications.role_assignment decorator # will work. self.resource_api.get_project(project_id) self.role_api.get_role(role_id) self.driver.add_role_to_user_and_project(user_id, project_id, role_id) def add_role_to_user_and_project(self, user_id, tenant_id, role_id): self._add_role_to_user_and_project_adapter( role_id, user_id=user_id, project_id=tenant_id) COMPUTED_ASSIGNMENTS_REGION.invalidate() def remove_user_from_project(self, tenant_id, user_id): """Remove user from a tenant :raises keystone.exception.ProjectNotFound: If the project doesn't exist. :raises keystone.exception.UserNotFound: If the user doesn't exist. """ roles = self.get_roles_for_user_and_project(user_id, tenant_id) if not roles: raise exception.NotFound(tenant_id) for role_id in roles: try: self.driver.remove_role_from_user_and_project(user_id, tenant_id, role_id) self.revoke_api.revoke_by_grant(role_id, user_id=user_id, project_id=tenant_id) except exception.RoleNotFound: LOG.debug("Removing role %s failed because it does not exist.", role_id) COMPUTED_ASSIGNMENTS_REGION.invalidate() # TODO(henry-nash): We might want to consider list limiting this at some # point in the future. def list_projects_for_user(self, user_id, hints=None): assignment_list = self.list_role_assignments( user_id=user_id, effective=True) # Use set() to process the list to remove any duplicates project_ids = list(set([x['project_id'] for x in assignment_list if x.get('project_id')])) return self.resource_api.list_projects_from_ids(list(project_ids)) # TODO(henry-nash): We might want to consider list limiting this at some # point in the future. def list_domains_for_user(self, user_id, hints=None): assignment_list = self.list_role_assignments( user_id=user_id, effective=True) # Use set() to process the list to remove any duplicates domain_ids = list(set([x['domain_id'] for x in assignment_list if x.get('domain_id')])) return self.resource_api.list_domains_from_ids(domain_ids) def list_domains_for_groups(self, group_ids): assignment_list = self.list_role_assignments( source_from_group_ids=group_ids, effective=True) domain_ids = list(set([x['domain_id'] for x in assignment_list if x.get('domain_id')])) return self.resource_api.list_domains_from_ids(domain_ids) def list_projects_for_groups(self, group_ids): assignment_list = self.list_role_assignments( source_from_group_ids=group_ids, effective=True) project_ids = list(set([x['project_id'] for x in assignment_list if x.get('project_id')])) return self.resource_api.list_projects_from_ids(project_ids) @notifications.role_assignment('deleted') def _remove_role_from_user_and_project_adapter(self, role_id, user_id=None, group_id=None, domain_id=None, project_id=None, inherited_to_projects=False, context=None): # The parameters for this method must match the parameters for # delete_grant so that the notifications.role_assignment decorator # will work. self.driver.remove_role_from_user_and_project(user_id, project_id, role_id) if project_id: self._emit_invalidate_grant_token_persistence(user_id, project_id) else: self.identity_api.emit_invalidate_user_token_persistence(user_id) self.revoke_api.revoke_by_grant(role_id, user_id=user_id, project_id=project_id) def remove_role_from_user_and_project(self, user_id, tenant_id, role_id): self._remove_role_from_user_and_project_adapter( role_id, user_id=user_id, project_id=tenant_id) COMPUTED_ASSIGNMENTS_REGION.invalidate() def _emit_invalidate_user_token_persistence(self, user_id): self.identity_api.emit_invalidate_user_token_persistence(user_id) # NOTE(lbragstad): The previous notification decorator behavior didn't # send the notification unless the operation was successful. We # maintain that behavior here by calling to the notification module # after the call to emit invalid user tokens. notifications.Audit.internal( notifications.INVALIDATE_USER_TOKEN_PERSISTENCE, user_id ) def _emit_invalidate_grant_token_persistence(self, user_id, project_id): self.identity_api.emit_invalidate_grant_token_persistence( {'user_id': user_id, 'project_id': project_id} ) @notifications.role_assignment('created') def create_grant(self, role_id, user_id=None, group_id=None, domain_id=None, project_id=None, inherited_to_projects=False, context=None): self.role_api.get_role(role_id) if domain_id: self.resource_api.get_domain(domain_id) if project_id: self.resource_api.get_project(project_id) self.driver.create_grant(role_id, user_id, group_id, domain_id, project_id, inherited_to_projects) COMPUTED_ASSIGNMENTS_REGION.invalidate() def get_grant(self, role_id, user_id=None, group_id=None, domain_id=None, project_id=None, inherited_to_projects=False): role_ref = self.role_api.get_role(role_id) if domain_id: self.resource_api.get_domain(domain_id) if project_id: self.resource_api.get_project(project_id) self.check_grant_role_id( role_id, user_id, group_id, domain_id, project_id, inherited_to_projects) return role_ref def list_grants(self, user_id=None, group_id=None, domain_id=None, project_id=None, inherited_to_projects=False): if domain_id: self.resource_api.get_domain(domain_id) if project_id: self.resource_api.get_project(project_id) grant_ids = self.list_grant_role_ids( user_id, group_id, domain_id, project_id, inherited_to_projects) return self.role_api.list_roles_from_ids(grant_ids) @notifications.role_assignment('deleted') def _emit_revoke_user_grant(self, role_id, user_id, domain_id, project_id, inherited_to_projects, context): self._emit_invalidate_grant_token_persistence(user_id, project_id) def delete_grant(self, role_id, user_id=None, group_id=None, domain_id=None, project_id=None, inherited_to_projects=False, context=None): if group_id is None: self.revoke_api.revoke_by_grant(user_id=user_id, role_id=role_id, domain_id=domain_id, project_id=project_id) self._emit_revoke_user_grant( role_id, user_id, domain_id, project_id, inherited_to_projects, context) else: try: # Group may contain a lot of users so revocation will be # by role & domain/project if domain_id is None: self.revoke_api.revoke_by_project_role_assignment( project_id, role_id ) else: self.revoke_api.revoke_by_domain_role_assignment( domain_id, role_id ) if CONF.token.revoke_by_id: # NOTE(morganfainberg): The user ids are the important part # for invalidating tokens below, so extract them here. for user in self.identity_api.list_users_in_group( group_id): self._emit_revoke_user_grant( role_id, user['id'], domain_id, project_id, inherited_to_projects, context) except exception.GroupNotFound: LOG.debug('Group %s not found, no tokens to invalidate.', group_id) # TODO(henry-nash): While having the call to get_role here mimics the # previous behavior (when it was buried inside the driver delete call), # this seems an odd place to have this check, given what we have # already done so far in this method. See Bug #1406776. self.role_api.get_role(role_id) if domain_id: self.resource_api.get_domain(domain_id) if project_id: self.resource_api.get_project(project_id) self.driver.delete_grant(role_id, user_id, group_id, domain_id, project_id, inherited_to_projects) COMPUTED_ASSIGNMENTS_REGION.invalidate() # The methods _expand_indirect_assignment, _list_direct_role_assignments # and _list_effective_role_assignments below are only used on # list_role_assignments, but they are not in its scope as nested functions # since it would significantly increase McCabe complexity, that should be # kept as it is in order to detect unnecessarily complex code, which is not # this case. def _expand_indirect_assignment(self, ref, user_id=None, project_id=None, subtree_ids=None, expand_groups=True): """Returns a list of expanded role assignments. This methods is called for each discovered assignment that either needs a group assignment expanded into individual user assignments, or needs an inherited assignment to be applied to its children. In all cases, if either user_id and/or project_id is specified, then we filter the result on those values. If project_id is specified and subtree_ids is None, then this indicates that we are only interested in that one project. If subtree_ids is not None, then this is an indicator that any inherited assignments need to be expanded down the tree. The actual subtree_ids don't need to be used as a filter here, since we already ensured only those assignments that could affect them were passed to this method. If expand_groups is True then we expand groups out to a list of assignments, one for each member of that group. """ def create_group_assignment(base_ref, user_id): """Creates a group assignment from the provided ref.""" ref = copy.deepcopy(base_ref) ref['user_id'] = user_id indirect = ref.setdefault('indirect', {}) indirect['group_id'] = ref.pop('group_id') return ref def expand_group_assignment(ref, user_id): """Expands group role assignment. For any group role assignment on a target, it is replaced by a list of role assignments containing one for each user of that group on that target. An example of accepted ref is:: { 'group_id': group_id, 'project_id': project_id, 'role_id': role_id } Once expanded, it should be returned as a list of entities like the one below, one for each each user_id in the provided group_id. :: { 'user_id': user_id, 'project_id': project_id, 'role_id': role_id, 'indirect' : { 'group_id': group_id } } Returned list will be formatted by the Controller, which will deduce a role assignment came from group membership if it has both 'user_id' in the main body of the dict and 'group_id' in indirect subdict. """ if user_id: return [create_group_assignment(ref, user_id=user_id)] return [create_group_assignment(ref, user_id=m['id']) for m in self.identity_api.list_users_in_group( ref['group_id'])] def expand_inherited_assignment(ref, user_id, project_id, subtree_ids, expand_groups): """Expands inherited role assignments. If expand_groups is True and this is a group role assignment on a target, replace it by a list of role assignments containing one for each user of that group, on every project under that target. If expand_groups is False, then return a group assignment on an inherited target. If this is a user role assignment on a specific target (i.e. project_id is specified, but subtree_ids is None) then simply format this as a single assignment (since we are effectively filtering on project_id). If however, project_id is None or subtree_ids is not None, then replace this one assignment with a list of role assignments for that user on every project under that target. An example of accepted ref is:: { 'group_id': group_id, 'project_id': parent_id, 'role_id': role_id, 'inherited_to_projects': 'projects' } Once expanded, it should be returned as a list of entities like the one below, one for each each user_id in the provided group_id and for each subproject_id in the project_id subtree. :: { 'user_id': user_id, 'project_id': subproject_id, 'role_id': role_id, 'indirect' : { 'group_id': group_id, 'project_id': parent_id } } Returned list will be formatted by the Controller, which will deduce a role assignment came from group membership if it has both 'user_id' in the main body of the dict and 'group_id' in the 'indirect' subdict, as well as it is possible to deduce if it has come from inheritance if it contains both a 'project_id' in the main body of the dict and 'parent_id' in the 'indirect' subdict. """ def create_inherited_assignment(base_ref, project_id): """Creates a project assignment from the provided ref. base_ref can either be a project or domain inherited assignment ref. """ ref = copy.deepcopy(base_ref) indirect = ref.setdefault('indirect', {}) if ref.get('project_id'): indirect['project_id'] = ref.pop('project_id') else: indirect['domain_id'] = ref.pop('domain_id') ref['project_id'] = project_id ref.pop('inherited_to_projects') return ref # Define expanded project list to which to apply this assignment if project_id: # Since ref is an inherited assignment and we are filtering by # project(s), we are only going to apply the assignment to the # relevant project(s) project_ids = [project_id] if subtree_ids: project_ids += subtree_ids # If this is a domain inherited assignment, then we know # that all the project_ids will get this assignment. If # it's a project inherited assignment, and the assignment # point is an ancestor of project_id, then we know that # again all the project_ids will get the assignment. If, # however, the assignment point is within the subtree, # then only a partial tree will get the assignment. if ref.get('project_id'): if ref['project_id'] in project_ids: project_ids = ( [x['id'] for x in self.resource_api.list_projects_in_subtree( ref['project_id'])]) elif ref.get('domain_id'): # A domain inherited assignment, so apply it to all projects # in this domain project_ids = ( [x['id'] for x in self.resource_api.list_projects_in_domain( ref['domain_id'])]) else: # It must be a project assignment, so apply it to its subtree project_ids = ( [x['id'] for x in self.resource_api.list_projects_in_subtree( ref['project_id'])]) new_refs = [] if 'group_id' in ref: if expand_groups: # Expand role assignment to all group members on any # inherited target of any of the projects for ref in expand_group_assignment(ref, user_id): new_refs += [create_inherited_assignment(ref, proj_id) for proj_id in project_ids] else: # Just place the group assignment on any inherited target # of any of the projects new_refs += [create_inherited_assignment(ref, proj_id) for proj_id in project_ids] else: # Expand role assignment for all projects new_refs += [create_inherited_assignment(ref, proj_id) for proj_id in project_ids] return new_refs if ref.get('inherited_to_projects') == 'projects': return expand_inherited_assignment( ref, user_id, project_id, subtree_ids, expand_groups) elif 'group_id' in ref and expand_groups: return expand_group_assignment(ref, user_id) return [ref] def add_implied_roles(self, role_refs): """Expand out implied roles. The role_refs passed in have had all inheritance and group assignments expanded out. We now need to look at the role_id in each ref and see if it is a prior role for some implied roles. If it is, then we need to duplicate that ref, one for each implied role. We store the prior role in the indirect dict that is part of such a duplicated ref, so that a caller can determine where the assignment came from. """ def _make_implied_ref_copy(prior_ref, implied_role_id): # Create a ref for an implied role from the ref of a prior role, # setting the new role_id to be the implied role and the indirect # role_id to be the prior role implied_ref = copy.deepcopy(prior_ref) implied_ref['role_id'] = implied_role_id indirect = implied_ref.setdefault('indirect', {}) indirect['role_id'] = prior_ref['role_id'] return implied_ref if not CONF.token.infer_roles: return role_refs try: implied_roles_cache = {} role_refs_to_check = list(role_refs) ref_results = list(role_refs) checked_role_refs = list() while(role_refs_to_check): next_ref = role_refs_to_check.pop() checked_role_refs.append(next_ref) next_role_id = next_ref['role_id'] if next_role_id in implied_roles_cache: implied_roles = implied_roles_cache[next_role_id] else: implied_roles = ( self.role_api.list_implied_roles(next_role_id)) implied_roles_cache[next_role_id] = implied_roles for implied_role in implied_roles: implied_ref = ( _make_implied_ref_copy( next_ref, implied_role['implied_role_id'])) if implied_ref in checked_role_refs: msg = _LE('Circular reference found ' 'role inference rules - %(prior_role_id)s.') LOG.error(msg, {'prior_role_id': next_ref['role_id']}) else: ref_results.append(implied_ref) role_refs_to_check.append(implied_ref) except exception.NotImplemented: LOG.error('Role driver does not support implied roles.') return ref_results def _filter_by_role_id(self, role_id, ref_results): # if we arrive here, we need to filer by role_id. filter_results = [] for ref in ref_results: if ref['role_id'] == role_id: filter_results.append(ref) return filter_results def _strip_domain_roles(self, role_refs): """Post process assignment list for domain roles. Domain roles are only designed to do the job of inferring other roles and since that has been done before this method is called, we need to remove any assignments that include a domain role. """ def _role_is_global(role_id): ref = self.role_api.get_role(role_id) return (ref['domain_id'] is None) filter_results = [] for ref in role_refs: if _role_is_global(ref['role_id']): filter_results.append(ref) return filter_results def _list_effective_role_assignments(self, role_id, user_id, group_id, domain_id, project_id, subtree_ids, inherited, source_from_group_ids, strip_domain_roles): """List role assignments in effective mode. When using effective mode, besides the direct assignments, the indirect ones that come from grouping or inheritance are retrieved and will then be expanded. The resulting list of assignments will be filtered by the provided parameters. If subtree_ids is not None, then we also want to include all subtree_ids in the filter as well. Since we are in effective mode, group can never act as a filter (since group assignments are expanded into user roles) and domain can only be filter if we want non-inherited assignments, since domains can't inherit assignments. The goal of this method is to only ask the driver for those assignments as could effect the result based on the parameter filters specified, hence avoiding retrieving a huge list. """ def list_role_assignments_for_actor( role_id, inherited, user_id=None, group_ids=None, project_id=None, subtree_ids=None, domain_id=None): """List role assignments for actor on target. List direct and indirect assignments for an actor, optionally for a given target (i.e. projects or domain). :param role_id: List for a specific role, can be None meaning all roles :param inherited: Indicates whether inherited assignments or only direct assignments are required. If None, then both are required. :param user_id: If not None, list only assignments that affect this user. :param group_ids: A list of groups required. Only one of user_id and group_ids can be specified :param project_id: If specified, only include those assignments that affect at least this project, with additionally any projects specified in subtree_ids :param subtree_ids: The list of projects in the subtree. If specified, also include those assignments that affect these projects. These projects are guaranteed to be in the same domain as the project specified in project_id. subtree_ids can only be specified if project_id has also been specified. :param domain_id: If specified, only include those assignments that affect this domain - by definition this will not include any inherited assignments :returns: List of assignments matching the criteria. Any inherited or group assignments that could affect the resulting response are included. """ project_ids_of_interest = None if project_id: if subtree_ids: project_ids_of_interest = subtree_ids + [project_id] else: project_ids_of_interest = [project_id] # List direct project role assignments non_inherited_refs = [] if inherited is False or inherited is None: # Get non inherited assignments non_inherited_refs = self.driver.list_role_assignments( role_id=role_id, domain_id=domain_id, project_ids=project_ids_of_interest, user_id=user_id, group_ids=group_ids, inherited_to_projects=False) inherited_refs = [] if inherited is True or inherited is None: # Get inherited assignments if project_id: # The project and any subtree are guaranteed to be owned by # the same domain, so since we are filtering by these # specific projects, then we can only get inherited # assignments from their common domain or from any of # their parents projects. # List inherited assignments from the project's domain proj_domain_id = self.resource_api.get_project( project_id)['domain_id'] inherited_refs += self.driver.list_role_assignments( role_id=role_id, domain_id=proj_domain_id, user_id=user_id, group_ids=group_ids, inherited_to_projects=True) # For inherited assignments from projects, since we know # they are from the same tree the only places these can # come from are from parents of the main project or # inherited assignments on the project or subtree itself. source_ids = [project['id'] for project in self.resource_api.list_project_parents( project_id)] if subtree_ids: source_ids += project_ids_of_interest if source_ids: inherited_refs += self.driver.list_role_assignments( role_id=role_id, project_ids=source_ids, user_id=user_id, group_ids=group_ids, inherited_to_projects=True) else: # List inherited assignments without filtering by target inherited_refs = self.driver.list_role_assignments( role_id=role_id, user_id=user_id, group_ids=group_ids, inherited_to_projects=True) return non_inherited_refs + inherited_refs # If filtering by group or inherited domain assignment the list is # guaranteed to be empty if group_id or (domain_id and inherited): return [] if user_id and source_from_group_ids: # You can't do both - and since source_from_group_ids is only used # internally, this must be a coding error by the caller. msg = _('Cannot list assignments sourced from groups and filtered ' 'by user ID.') raise exception.UnexpectedError(msg) # If filtering by domain, then only non-inherited assignments are # relevant, since domains don't inherit assignments inherited = False if domain_id else inherited # List user or explicit group assignments. # Due to the need to expand implied roles, this call will skip # filtering by role_id and instead return the whole set of roles. # Matching on the specified role is performed at the end. direct_refs = list_role_assignments_for_actor( role_id=None, user_id=user_id, group_ids=source_from_group_ids, project_id=project_id, subtree_ids=subtree_ids, domain_id=domain_id, inherited=inherited) # And those from the user's groups, so long as we are not restricting # to a set of source groups (in which case we already got those # assignments in the direct listing above). group_refs = [] if not source_from_group_ids and user_id: group_ids = self._get_group_ids_for_user_id(user_id) if group_ids: group_refs = list_role_assignments_for_actor( role_id=None, project_id=project_id, subtree_ids=subtree_ids, group_ids=group_ids, domain_id=domain_id, inherited=inherited) # Expand grouping and inheritance on retrieved role assignments refs = [] expand_groups = (source_from_group_ids is None) for ref in (direct_refs + group_refs): refs += self._expand_indirect_assignment( ref, user_id, project_id, subtree_ids, expand_groups) refs = self.add_implied_roles(refs) if strip_domain_roles: refs = self._strip_domain_roles(refs) if role_id: refs = self._filter_by_role_id(role_id, refs) return refs def _list_direct_role_assignments(self, role_id, user_id, group_id, domain_id, project_id, subtree_ids, inherited): """List role assignments without applying expansion. Returns a list of direct role assignments, where their attributes match the provided filters. If subtree_ids is not None, then we also want to include all subtree_ids in the filter as well. """ group_ids = [group_id] if group_id else None project_ids_of_interest = None if project_id: if subtree_ids: project_ids_of_interest = subtree_ids + [project_id] else: project_ids_of_interest = [project_id] return self.driver.list_role_assignments( role_id=role_id, user_id=user_id, group_ids=group_ids, domain_id=domain_id, project_ids=project_ids_of_interest, inherited_to_projects=inherited) def list_role_assignments(self, role_id=None, user_id=None, group_id=None, domain_id=None, project_id=None, include_subtree=False, inherited=None, effective=None, include_names=False, source_from_group_ids=None, strip_domain_roles=True): """List role assignments, honoring effective mode and provided filters. Returns a list of role assignments, where their attributes match the provided filters (role_id, user_id, group_id, domain_id, project_id and inherited). If include_subtree is True, then assignments on all descendants of the project specified by project_id are also included. The inherited filter defaults to None, meaning to get both non-inherited and inherited role assignments. If effective mode is specified, this means that rather than simply return the assignments that match the filters, any group or inheritance assignments will be expanded. Group assignments will become assignments for all the users in that group, and inherited assignments will be shown on the projects below the assignment point. Think of effective mode as being the list of assignments that actually affect a user, for example the roles that would be placed in a token. If include_names is set to true the entities' names are returned in addition to their id's. source_from_group_ids is a list of group IDs and, if specified, then only those assignments that are derived from membership of these groups are considered, and any such assignments will not be expanded into their user membership assignments. This is different to a group filter of the resulting list, instead being a restriction on which assignments should be considered before expansion of inheritance. This option is only used internally (i.e. it is not exposed at the API level) and is only supported in effective mode (since in regular mode there is no difference between this and a group filter, other than it is a list of groups). In effective mode, any domain specific roles are usually stripped from the returned assignments (since such roles are not placed in tokens). This stripping can be disabled by specifying strip_domain_roles=False, which is useful for internal calls like trusts which need to examine the full set of roles. If OS-INHERIT extension is disabled or the used driver does not support inherited roles retrieval, inherited role assignments will be ignored. """ if not CONF.os_inherit.enabled: if inherited: return [] inherited = False subtree_ids = None if project_id and include_subtree: subtree_ids = ( [x['id'] for x in self.resource_api.list_projects_in_subtree(project_id)]) if effective: role_assignments = self._list_effective_role_assignments( role_id, user_id, group_id, domain_id, project_id, subtree_ids, inherited, source_from_group_ids, strip_domain_roles) else: role_assignments = self._list_direct_role_assignments( role_id, user_id, group_id, domain_id, project_id, subtree_ids, inherited) if include_names: return self._get_names_from_role_assignments(role_assignments) return role_assignments def _get_names_from_role_assignments(self, role_assignments): role_assign_list = [] for role_asgmt in role_assignments: new_assign = {} for id_type, id_ in role_asgmt.items(): if id_type == 'domain_id': _domain = self.resource_api.get_domain(id_) new_assign['domain_id'] = _domain['id'] new_assign['domain_name'] = _domain['name'] elif id_type == 'user_id': _user = self.identity_api.get_user(id_) new_assign['user_id'] = _user['id'] new_assign['user_name'] = _user['name'] new_assign['user_domain_id'] = _user['domain_id'] new_assign['user_domain_name'] = ( self.resource_api.get_domain(_user['domain_id']) ['name']) elif id_type == 'group_id': _group = self.identity_api.get_group(id_) new_assign['group_id'] = _group['id'] new_assign['group_name'] = _group['name'] new_assign['group_domain_id'] = _group['domain_id'] new_assign['group_domain_name'] = ( self.resource_api.get_domain(_group['domain_id']) ['name']) elif id_type == 'project_id': _project = self.resource_api.get_project(id_) new_assign['project_id'] = _project['id'] new_assign['project_name'] = _project['name'] new_assign['project_domain_id'] = _project['domain_id'] new_assign['project_domain_name'] = ( self.resource_api.get_domain(_project['domain_id']) ['name']) elif id_type == 'role_id': _role = self.role_api.get_role(id_) new_assign['role_id'] = _role['id'] new_assign['role_name'] = _role['name'] role_assign_list.append(new_assign) return role_assign_list def delete_tokens_for_role_assignments(self, role_id): assignments = self.list_role_assignments(role_id=role_id) # Iterate over the assignments for this role and build the list of # user or user+project IDs for the tokens we need to delete user_ids = set() user_and_project_ids = list() for assignment in assignments: # If we have a project assignment, then record both the user and # project IDs so we can target the right token to delete. If it is # a domain assignment, we might as well kill all the tokens for # the user, since in the vast majority of cases all the tokens # for a user will be within one domain anyway, so not worth # trying to delete tokens for each project in the domain. if 'user_id' in assignment: if 'project_id' in assignment: user_and_project_ids.append( (assignment['user_id'], assignment['project_id'])) elif 'domain_id' in assignment: self._emit_invalidate_user_token_persistence( assignment['user_id']) elif 'group_id' in assignment: # Add in any users for this group, being tolerant of any # cross-driver database integrity errors. try: users = self.identity_api.list_users_in_group( assignment['group_id']) except exception.GroupNotFound: # Ignore it, but log a debug message if 'project_id' in assignment: target = _('Project (%s)') % assignment['project_id'] elif 'domain_id' in assignment: target = _('Domain (%s)') % assignment['domain_id'] else: target = _('Unknown Target') msg = ('Group (%(group)s), referenced in assignment ' 'for %(target)s, not found - ignoring.') LOG.debug(msg, {'group': assignment['group_id'], 'target': target}) continue if 'project_id' in assignment: for user in users: user_and_project_ids.append( (user['id'], assignment['project_id'])) elif 'domain_id' in assignment: for user in users: self._emit_invalidate_user_token_persistence( user['id']) # Now process the built up lists. Before issuing calls to delete any # tokens, let's try and minimize the number of calls by pruning out # any user+project deletions where a general token deletion for that # same user is also planned. user_and_project_ids_to_action = [] for user_and_project_id in user_and_project_ids: if user_and_project_id[0] not in user_ids: user_and_project_ids_to_action.append(user_and_project_id) for user_id, project_id in user_and_project_ids_to_action: payload = {'user_id': user_id, 'project_id': project_id} notifications.Audit.internal( notifications.INVALIDATE_USER_PROJECT_TOKEN_PERSISTENCE, payload ) # The AssignmentDriverBase class is the set of driver methods from earlier # drivers that we still support, that have not been removed or modified. This # class is then used to created the augmented V8 and V9 version abstract driver # classes, without having to duplicate a lot of abstract method signatures. # If you remove a method from V9, then move the abstract methods from this Base # class to the V8 class. Do not modify any of the method signatures in the Base # class - changes should only be made in the V8 and subsequent classes. @six.add_metaclass(abc.ABCMeta) class AssignmentDriverBase(object): def _get_list_limit(self): return CONF.assignment.list_limit or CONF.list_limit @abc.abstractmethod def add_role_to_user_and_project(self, user_id, tenant_id, role_id): """Add a role to a user within given tenant. :raises keystone.exception.Conflict: If a duplicate role assignment exists. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def remove_role_from_user_and_project(self, user_id, tenant_id, role_id): """Remove a role from a user within given tenant. :raises keystone.exception.RoleNotFound: If the role doesn't exist. """ raise exception.NotImplemented() # pragma: no cover # assignment/grant crud @abc.abstractmethod def create_grant(self, role_id, user_id=None, group_id=None, domain_id=None, project_id=None, inherited_to_projects=False): """Creates a new assignment/grant. If the assignment is to a domain, then optionally it may be specified as inherited to owned projects (this requires the OS-INHERIT extension to be enabled). """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_grant_role_ids(self, user_id=None, group_id=None, domain_id=None, project_id=None, inherited_to_projects=False): """Lists role ids for assignments/grants.""" raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def check_grant_role_id(self, role_id, user_id=None, group_id=None, domain_id=None, project_id=None, inherited_to_projects=False): """Checks an assignment/grant role id. :raises keystone.exception.RoleAssignmentNotFound: If the role assignment doesn't exist. :returns: None or raises an exception if grant not found """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_grant(self, role_id, user_id=None, group_id=None, domain_id=None, project_id=None, inherited_to_projects=False): """Deletes assignments/grants. :raises keystone.exception.RoleAssignmentNotFound: If the role assignment doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_role_assignments(self, role_id=None, user_id=None, group_ids=None, domain_id=None, project_ids=None, inherited_to_projects=None): """Returns a list of role assignments for actors on targets. Available parameters represent values in which the returned role assignments attributes need to be filtered on. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_project_assignments(self, project_id): """Deletes all assignments for a project. :raises keystone.exception.ProjectNotFound: If the project doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_role_assignments(self, role_id): """Deletes all assignments for a role.""" raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_user_assignments(self, user_id): """Deletes all assignments for a user. :raises keystone.exception.RoleNotFound: If the role doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_group_assignments(self, group_id): """Deletes all assignments for a group. :raises keystone.exception.RoleNotFound: If the role doesn't exist. """ raise exception.NotImplemented() # pragma: no cover class AssignmentDriverV8(AssignmentDriverBase): """Removed or redefined methods from V8. Move the abstract methods of any methods removed or modified in later versions of the driver from AssignmentDriverBase to here. We maintain this so that legacy drivers, which will be a subclass of AssignmentDriverV8, can still reference them. """ @abc.abstractmethod def list_user_ids_for_project(self, tenant_id): """Lists all user IDs with a role assignment in the specified project. :returns: a list of user_ids or an empty set. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_project_ids_for_user(self, user_id, group_ids, hints, inherited=False): """List all project ids associated with a given user. :param user_id: the user in question :param group_ids: the groups this user is a member of. This list is built in the Manager, so that the driver itself does not have to call across to identity. :param hints: filter hints which the driver should implement if at all possible. :param inherited: whether assignments marked as inherited should be included. :returns: a list of project ids or an empty list. This method should not try and expand any inherited assignments, just report the projects that have the role for this user. The manager method is responsible for expanding out inherited assignments. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_domain_ids_for_user(self, user_id, group_ids, hints, inherited=False): """List all domain ids associated with a given user. :param user_id: the user in question :param group_ids: the groups this user is a member of. This list is built in the Manager, so that the driver itself does not have to call across to identity. :param hints: filter hints which the driver should implement if at all possible. :param inherited: whether to return domain_ids that have inherited assignments or not. :returns: a list of domain ids or an empty list. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_project_ids_for_groups(self, group_ids, hints, inherited=False): """List project ids accessible to specified groups. :param group_ids: List of group ids. :param hints: filter hints which the driver should implement if at all possible. :param inherited: whether assignments marked as inherited should be included. :returns: List of project ids accessible to specified groups. This method should not try and expand any inherited assignments, just report the projects that have the role for this group. The manager method is responsible for expanding out inherited assignments. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_domain_ids_for_groups(self, group_ids, inherited=False): """List domain ids accessible to specified groups. :param group_ids: List of group ids. :param inherited: whether to return domain_ids that have inherited assignments or not. :returns: List of domain ids accessible to specified groups. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_role_ids_for_groups_on_project( self, group_ids, project_id, project_domain_id, project_parents): """List the group role ids for a specific project. Supports the ``OS-INHERIT`` role inheritance from the project's domain if supported by the assignment driver. :param group_ids: list of group ids :type group_ids: list :param project_id: project identifier :type project_id: str :param project_domain_id: project's domain identifier :type project_domain_id: str :param project_parents: list of parent ids of this project :type project_parents: list :returns: list of role ids for the project :rtype: list """ raise exception.NotImplemented() @abc.abstractmethod def list_role_ids_for_groups_on_domain(self, group_ids, domain_id): """List the group role ids for a specific domain. :param group_ids: list of group ids :type group_ids: list :param domain_id: domain identifier :type domain_id: str :returns: list of role ids for the project :rtype: list """ raise exception.NotImplemented() class AssignmentDriverV9(AssignmentDriverBase): """New or redefined methods from V8. Add any new V9 abstract methods (or those with modified signatures) to this class. """ @abc.abstractmethod def delete_domain_assignments(self, domain_id): """Deletes all assignments for a domain.""" raise exception.NotImplemented() class V9AssignmentWrapperForV8Driver(AssignmentDriverV9): """Wrapper class to supported a V8 legacy driver. In order to support legacy drivers without having to make the manager code driver-version aware, we wrap legacy drivers so that they look like the latest version. For the various changes made in a new driver, here are the actions needed in this wrapper: Method removed from new driver - remove the call-through method from this class, since the manager will no longer be calling it. Method signature (or meaning) changed - wrap the old method in a new signature here, and munge the input and output parameters accordingly. New method added to new driver - add a method to implement the new functionality here if possible. If that is not possible, then return NotImplemented, since we do not guarantee to support new functionality with legacy drivers. """ @versionutils.deprecated( as_of=versionutils.deprecated.MITAKA, what='keystone.assignment.AssignmentDriverV8', in_favor_of='keystone.assignment.AssignmentDriverV9', remove_in=+2) def __init__(self, wrapped_driver): self.driver = wrapped_driver def delete_domain_assignments(self, domain_id): """Deletes all assignments for a domain.""" msg = _LW('delete_domain_assignments method not found in custom ' 'assignment driver. Domain assignments for domain (%s) to ' 'users from other domains will not be removed. This was ' 'added in V9 of the assignment driver.') LOG.warning(msg, domain_id) def default_role_driver(self): return self.driver.default_role_driver() def default_resource_driver(self): return self.driver.default_resource_driver() def add_role_to_user_and_project(self, user_id, tenant_id, role_id): self.driver.add_role_to_user_and_project(user_id, tenant_id, role_id) def remove_role_from_user_and_project(self, user_id, tenant_id, role_id): self.driver.remove_role_from_user_and_project( user_id, tenant_id, role_id) def create_grant(self, role_id, user_id=None, group_id=None, domain_id=None, project_id=None, inherited_to_projects=False): self.driver.create_grant( role_id, user_id=user_id, group_id=group_id, domain_id=domain_id, project_id=project_id, inherited_to_projects=inherited_to_projects) def list_grant_role_ids(self, user_id=None, group_id=None, domain_id=None, project_id=None, inherited_to_projects=False): return self.driver.list_grant_role_ids( user_id=user_id, group_id=group_id, domain_id=domain_id, project_id=project_id, inherited_to_projects=inherited_to_projects) def check_grant_role_id(self, role_id, user_id=None, group_id=None, domain_id=None, project_id=None, inherited_to_projects=False): self.driver.check_grant_role_id( role_id, user_id=user_id, group_id=group_id, domain_id=domain_id, project_id=project_id, inherited_to_projects=inherited_to_projects) def delete_grant(self, role_id, user_id=None, group_id=None, domain_id=None, project_id=None, inherited_to_projects=False): self.driver.delete_grant( role_id, user_id=user_id, group_id=group_id, domain_id=domain_id, project_id=project_id, inherited_to_projects=inherited_to_projects) def list_role_assignments(self, role_id=None, user_id=None, group_ids=None, domain_id=None, project_ids=None, inherited_to_projects=None): return self.driver.list_role_assignments( role_id=role_id, user_id=user_id, group_ids=group_ids, domain_id=domain_id, project_ids=project_ids, inherited_to_projects=inherited_to_projects) def delete_project_assignments(self, project_id): self.driver.delete_project_assignments(project_id) def delete_role_assignments(self, role_id): self.driver.delete_role_assignments(role_id) def delete_user_assignments(self, user_id): self.driver.delete_user_assignments(user_id) def delete_group_assignments(self, group_id): self.driver.delete_group_assignments(group_id) Driver = manager.create_legacy_driver(AssignmentDriverV8) @dependency.provider('role_api') @dependency.requires('assignment_api') class RoleManager(manager.Manager): """Default pivot point for the Role backend.""" driver_namespace = 'keystone.role' _ROLE = 'role' def __init__(self): # If there is a specific driver specified for role, then use it. # Otherwise retrieve the driver type from the assignment driver. role_driver = CONF.role.driver if role_driver is None: assignment_manager = dependency.get_provider('assignment_api') role_driver = assignment_manager.default_role_driver() super(RoleManager, self).__init__(role_driver) # Make sure it is a driver version we support, and if it is a legacy # driver, then wrap it. if isinstance(self.driver, RoleDriverV8): self.driver = V9RoleWrapperForV8Driver(self.driver) elif not isinstance(self.driver, RoleDriverV9): raise exception.UnsupportedDriverVersion(driver=role_driver) @MEMOIZE def get_role(self, role_id): return self.driver.get_role(role_id) def create_role(self, role_id, role, initiator=None): ret = self.driver.create_role(role_id, role) notifications.Audit.created(self._ROLE, role_id, initiator) if MEMOIZE.should_cache(ret): self.get_role.set(ret, self, role_id) return ret @manager.response_truncated def list_roles(self, hints=None): return self.driver.list_roles(hints or driver_hints.Hints()) def update_role(self, role_id, role, initiator=None): original_role = self.driver.get_role(role_id) if ('domain_id' in role and role['domain_id'] != original_role['domain_id']): raise exception.ValidationError( message=_('Update of `domain_id` is not allowed.')) ret = self.driver.update_role(role_id, role) notifications.Audit.updated(self._ROLE, role_id, initiator) self.get_role.invalidate(self, role_id) return ret def delete_role(self, role_id, initiator=None): self.assignment_api.delete_tokens_for_role_assignments(role_id) self.assignment_api.delete_role_assignments(role_id) self.driver.delete_role(role_id) notifications.Audit.deleted(self._ROLE, role_id, initiator) self.get_role.invalidate(self, role_id) COMPUTED_ASSIGNMENTS_REGION.invalidate() # TODO(ayoung): Add notification def create_implied_role(self, prior_role_id, implied_role_id): implied_role = self.driver.get_role(implied_role_id) self.driver.get_role(prior_role_id) if implied_role['name'] in CONF.assignment.prohibited_implied_role: raise exception.InvalidImpliedRole(role_id=implied_role_id) response = self.driver.create_implied_role( prior_role_id, implied_role_id) COMPUTED_ASSIGNMENTS_REGION.invalidate() return response def delete_implied_role(self, prior_role_id, implied_role_id): self.driver.delete_implied_role(prior_role_id, implied_role_id) COMPUTED_ASSIGNMENTS_REGION.invalidate() # The RoleDriverBase class is the set of driver methods from earlier # drivers that we still support, that have not been removed or modified. This # class is then used to created the augmented V8 and V9 version abstract driver # classes, without having to duplicate a lot of abstract method signatures. # If you remove a method from V9, then move the abstract methods from this Base # class to the V8 class. Do not modify any of the method signatures in the Base # class - changes should only be made in the V8 and subsequent classes. @six.add_metaclass(abc.ABCMeta) class RoleDriverBase(object): def _get_list_limit(self): return CONF.role.list_limit or CONF.list_limit @abc.abstractmethod def create_role(self, role_id, role): """Creates a new role. :raises keystone.exception.Conflict: If a duplicate role exists. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_roles(self, hints): """List roles in the system. :param hints: filter hints which the driver should implement if at all possible. :returns: a list of role_refs or an empty list. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_roles_from_ids(self, role_ids): """List roles for the provided list of ids. :param role_ids: list of ids :returns: a list of role_refs. This method is used internally by the assignment manager to bulk read a set of roles given their ids. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_role(self, role_id): """Get a role by ID. :returns: role_ref :raises keystone.exception.RoleNotFound: If the role doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def update_role(self, role_id, role): """Updates an existing role. :raises keystone.exception.RoleNotFound: If the role doesn't exist. :raises keystone.exception.Conflict: If a duplicate role exists. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_role(self, role_id): """Deletes an existing role. :raises keystone.exception.RoleNotFound: If the role doesn't exist. """ raise exception.NotImplemented() # pragma: no cover class RoleDriverV8(RoleDriverBase): """Removed or redefined methods from V8. Move the abstract methods of any methods removed or modified in later versions of the driver from RoleDriverBase to here. We maintain this so that legacy drivers, which will be a subclass of RoleDriverV8, can still reference them. """ pass class RoleDriverV9(RoleDriverBase): """New or redefined methods from V8. Add any new V9 abstract methods (or those with modified signatures) to this class. """ @abc.abstractmethod def get_implied_role(self, prior_role_id, implied_role_id): """Fetches a role inference rule :raises keystone.exception.ImpliedRoleNotFound: If the implied role doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def create_implied_role(self, prior_role_id, implied_role_id): """Creates a role inference rule :raises: keystone.exception.RoleNotFound: If the role doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_implied_role(self, prior_role_id, implied_role_id): """Deletes a role inference rule :raises keystone.exception.ImpliedRoleNotFound: If the implied role doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_role_inference_rules(self): """Lists all the rules used to imply one role from another""" raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_implied_roles(self, prior_role_id): """Lists roles implied from the prior role ID""" raise exception.NotImplemented() # pragma: no cover class V9RoleWrapperForV8Driver(RoleDriverV9): """Wrapper class to supported a V8 legacy driver. In order to support legacy drivers without having to make the manager code driver-version aware, we wrap legacy drivers so that they look like the latest version. For the various changes made in a new driver, here are the actions needed in this wrapper: Method removed from new driver - remove the call-through method from this class, since the manager will no longer be calling it. Method signature (or meaning) changed - wrap the old method in a new signature here, and munge the input and output parameters accordingly. New method added to new driver - add a method to implement the new functionality here if possible. If that is not possible, then return NotImplemented, since we do not guarantee to support new functionality with legacy drivers. This V8 wrapper contains the following support for newer manager code: - The current manager code expects a role entity to have a domain_id attribute, with a non-None value indicating a domain specific role. V8 drivers will only understand global roles, hence if a non-None domain_id is passed to this wrapper, it will raise a NotImplemented exception. If a None-valued domain_id is passed in, it will be trimmed off before the underlying driver is called (and a None-valued domain_id attribute is added in for any entities returned to the manager. """ @versionutils.deprecated( as_of=versionutils.deprecated.MITAKA, what='keystone.assignment.RoleDriverV8', in_favor_of='keystone.assignment.RoleDriverV9', remove_in=+2) def __init__(self, wrapped_driver): self.driver = wrapped_driver def _append_null_domain_id(self, role_or_list): def _append_null_domain_id_to_dict(role): if 'domain_id' not in role: role['domain_id'] = None return role if isinstance(role_or_list, list): return [_append_null_domain_id_to_dict(x) for x in role_or_list] else: return _append_null_domain_id_to_dict(role_or_list) def _trim_and_assert_null_domain_id(self, role): if 'domain_id' in role: if role['domain_id'] is not None: raise exception.NotImplemented( _('Domain specific roles are not supported in the V8 ' 'role driver')) else: new_role = role.copy() new_role.pop('domain_id') return new_role else: return role def create_role(self, role_id, role): new_role = self._trim_and_assert_null_domain_id(role) return self._append_null_domain_id( self.driver.create_role(role_id, new_role)) def list_roles(self, hints): return self._append_null_domain_id(self.driver.list_roles(hints)) def list_roles_from_ids(self, role_ids): return self._append_null_domain_id( self.driver.list_roles_from_ids(role_ids)) def get_role(self, role_id): return self._append_null_domain_id(self.driver.get_role(role_id)) def update_role(self, role_id, role): update_role = self._trim_and_assert_null_domain_id(role) return self._append_null_domain_id( self.driver.update_role(role_id, update_role)) def delete_role(self, role_id): self.driver.delete_role(role_id) def get_implied_role(self, prior_role_id, implied_role_id): raise exception.NotImplemented() # pragma: no cover def create_implied_role(self, prior_role_id, implied_role_id): raise exception.NotImplemented() # pragma: no cover def delete_implied_role(self, prior_role_id, implied_role_id): raise exception.NotImplemented() # pragma: no cover def list_implied_roles(self, prior_role_id): raise exception.NotImplemented() # pragma: no cover def list_role_inference_rules(self): raise exception.NotImplemented() # pragma: no cover RoleDriver = manager.create_legacy_driver(RoleDriverV8) keystone-9.0.0/keystone/assignment/controllers.py0000664000567000056710000011535112701407102023453 0ustar jenkinsjenkins00000000000000# Copyright 2013 Metacloud, Inc. # Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Workflow Logic the Assignment service.""" import functools import uuid from oslo_config import cfg from oslo_log import log from six.moves import urllib from keystone.assignment import schema from keystone.common import controller from keystone.common import dependency from keystone.common import utils from keystone.common import validation from keystone.common import wsgi from keystone import exception from keystone.i18n import _ from keystone import notifications CONF = cfg.CONF LOG = log.getLogger(__name__) @dependency.requires('assignment_api', 'identity_api', 'token_provider_api') class TenantAssignment(controller.V2Controller): """The V2 Project APIs that are processing assignments.""" @controller.v2_auth_deprecated def get_projects_for_token(self, context, **kw): """Get valid tenants for token based on token used to authenticate. Pulls the token from the context, validates it and gets the valid tenants for the user in the token. Doesn't care about token scopedness. """ token_ref = utils.get_token_ref(context) tenant_refs = ( self.assignment_api.list_projects_for_user(token_ref.user_id)) tenant_refs = [self.v3_to_v2_project(ref) for ref in tenant_refs if ref['domain_id'] == CONF.identity.default_domain_id] params = { 'limit': context['query_string'].get('limit'), 'marker': context['query_string'].get('marker'), } return self.format_project_list(tenant_refs, **params) @controller.v2_deprecated def get_project_users(self, context, tenant_id, **kw): self.assert_admin(context) user_refs = [] user_ids = self.assignment_api.list_user_ids_for_project(tenant_id) for user_id in user_ids: try: user_ref = self.identity_api.get_user(user_id) except exception.UserNotFound: # Log that user is missing and continue on. message = ("User %(user_id)s in project %(project_id)s " "doesn't exist.") LOG.debug(message, {'user_id': user_id, 'project_id': tenant_id}) else: user_refs.append(self.v3_to_v2_user(user_ref)) return {'users': user_refs} @dependency.requires('assignment_api', 'role_api') class Role(controller.V2Controller): """The Role management APIs.""" @controller.v2_deprecated def get_role(self, context, role_id): self.assert_admin(context) return {'role': self.role_api.get_role(role_id)} @controller.v2_deprecated def create_role(self, context, role): role = self._normalize_dict(role) self.assert_admin(context) if 'name' not in role or not role['name']: msg = _('Name field is required and cannot be empty') raise exception.ValidationError(message=msg) if role['name'] == CONF.member_role_name: # Use the configured member role ID when creating the configured # member role name. This avoids the potential of creating a # "member" role with an unexpected ID. role_id = CONF.member_role_id else: role_id = uuid.uuid4().hex role['id'] = role_id initiator = notifications._get_request_audit_info(context) role_ref = self.role_api.create_role(role_id, role, initiator) return {'role': role_ref} @controller.v2_deprecated def delete_role(self, context, role_id): self.assert_admin(context) initiator = notifications._get_request_audit_info(context) self.role_api.delete_role(role_id, initiator) @controller.v2_deprecated def get_roles(self, context): self.assert_admin(context) return {'roles': self.role_api.list_roles()} @dependency.requires('assignment_api', 'resource_api', 'role_api') class RoleAssignmentV2(controller.V2Controller): """The V2 Role APIs that are processing assignments.""" # COMPAT(essex-3) @controller.v2_deprecated def get_user_roles(self, context, user_id, tenant_id=None): """Get the roles for a user and tenant pair. Since we're trying to ignore the idea of user-only roles we're not implementing them in hopes that the idea will die off. """ self.assert_admin(context) # NOTE(davechen): Router without project id is defined, # but we don't plan on implementing this. if tenant_id is None: raise exception.NotImplemented( message=_('User roles not supported: tenant_id required')) roles = self.assignment_api.get_roles_for_user_and_project( user_id, tenant_id) return {'roles': [self.role_api.get_role(x) for x in roles]} @controller.v2_deprecated def add_role_to_user(self, context, user_id, role_id, tenant_id=None): """Add a role to a user and tenant pair. Since we're trying to ignore the idea of user-only roles we're not implementing them in hopes that the idea will die off. """ self.assert_admin(context) if tenant_id is None: raise exception.NotImplemented( message=_('User roles not supported: tenant_id required')) self.assignment_api.add_role_to_user_and_project( user_id, tenant_id, role_id) role_ref = self.role_api.get_role(role_id) return {'role': role_ref} @controller.v2_deprecated def remove_role_from_user(self, context, user_id, role_id, tenant_id=None): """Remove a role from a user and tenant pair. Since we're trying to ignore the idea of user-only roles we're not implementing them in hopes that the idea will die off. """ self.assert_admin(context) if tenant_id is None: raise exception.NotImplemented( message=_('User roles not supported: tenant_id required')) # This still has the weird legacy semantics that adding a role to # a user also adds them to a tenant, so we must follow up on that self.assignment_api.remove_role_from_user_and_project( user_id, tenant_id, role_id) # COMPAT(diablo): CRUD extension @controller.v2_deprecated def get_role_refs(self, context, user_id): """Ultimate hack to get around having to make role_refs first-class. This will basically iterate over the various roles the user has in all tenants the user is a member of and create fake role_refs where the id encodes the user-tenant-role information so we can look up the appropriate data when we need to delete them. """ self.assert_admin(context) tenants = self.assignment_api.list_projects_for_user(user_id) o = [] for tenant in tenants: # As a v2 call, we should limit the response to those projects in # the default domain. if tenant['domain_id'] != CONF.identity.default_domain_id: continue role_ids = self.assignment_api.get_roles_for_user_and_project( user_id, tenant['id']) for role_id in role_ids: ref = {'roleId': role_id, 'tenantId': tenant['id'], 'userId': user_id} ref['id'] = urllib.parse.urlencode(ref) o.append(ref) return {'roles': o} # COMPAT(diablo): CRUD extension @controller.v2_deprecated def create_role_ref(self, context, user_id, role): """This is actually used for adding a user to a tenant. In the legacy data model adding a user to a tenant required setting a role. """ self.assert_admin(context) # TODO(termie): for now we're ignoring the actual role tenant_id = role.get('tenantId') role_id = role.get('roleId') self.assignment_api.add_role_to_user_and_project( user_id, tenant_id, role_id) role_ref = self.role_api.get_role(role_id) return {'role': role_ref} # COMPAT(diablo): CRUD extension @controller.v2_deprecated def delete_role_ref(self, context, user_id, role_ref_id): """This is actually used for deleting a user from a tenant. In the legacy data model removing a user from a tenant required deleting a role. To emulate this, we encode the tenant and role in the role_ref_id, and if this happens to be the last role for the user-tenant pair, we remove the user from the tenant. """ self.assert_admin(context) # TODO(termie): for now we're ignoring the actual role role_ref_ref = urllib.parse.parse_qs(role_ref_id) tenant_id = role_ref_ref.get('tenantId')[0] role_id = role_ref_ref.get('roleId')[0] self.assignment_api.remove_role_from_user_and_project( user_id, tenant_id, role_id) @dependency.requires('assignment_api', 'resource_api') class ProjectAssignmentV3(controller.V3Controller): """The V3 Project APIs that are processing assignments.""" collection_name = 'projects' member_name = 'project' def __init__(self): super(ProjectAssignmentV3, self).__init__() self.get_member_from_driver = self.resource_api.get_project @controller.filterprotected('domain_id', 'enabled', 'name') def list_user_projects(self, context, filters, user_id): hints = ProjectAssignmentV3.build_driver_hints(context, filters) refs = self.assignment_api.list_projects_for_user(user_id, hints=hints) return ProjectAssignmentV3.wrap_collection(context, refs, hints=hints) @dependency.requires('role_api') class RoleV3(controller.V3Controller): """The V3 Role CRUD APIs. To ease complexity (and hence risk) in writing the policy rules for the role APIs, we create separate policy actions for roles that are domain specific, as opposed to those that are global. In order to achieve this each of the role API methods has a wrapper method that checks to see if the role is global or domain specific. NOTE (henry-nash): If this separate global vs scoped policy action pattern becomes repeated for other entities, we should consider encapsulating this into a specialized router class. """ collection_name = 'roles' member_name = 'role' def __init__(self): super(RoleV3, self).__init__() self.get_member_from_driver = self.role_api.get_role def _is_domain_role(self, role): return role.get('domain_id') is not None def _is_domain_role_target(self, role_id): try: role = self.role_api.get_role(role_id) except exception.RoleNotFound: # We hide this error since we have not yet carried out a policy # check - and it maybe that the caller isn't authorized to make # this call. If so, we want that error to be raised instead. return False return self._is_domain_role(role) def create_role_wrapper(self, context, role): if self._is_domain_role(role): return self.create_domain_role(context, role=role) else: return self.create_role(context, role=role) @controller.protected() @validation.validated(schema.role_create, 'role') def create_role(self, context, role): return self._create_role(context, role) @controller.protected() @validation.validated(schema.role_create, 'role') def create_domain_role(self, context, role): return self._create_role(context, role) def list_roles_wrapper(self, context): # If there is no domain_id filter defined, then we only want to return # global roles, so we set the domain_id filter to None. params = context['query_string'] if 'domain_id' not in params: context['query_string']['domain_id'] = None if context['query_string']['domain_id'] is not None: return self.list_domain_roles(context) else: return self.list_roles(context) @controller.filterprotected('name', 'domain_id') def list_roles(self, context, filters): return self._list_roles(context, filters) @controller.filterprotected('name', 'domain_id') def list_domain_roles(self, context, filters): return self._list_roles(context, filters) def get_role_wrapper(self, context, role_id): if self._is_domain_role_target(role_id): return self.get_domain_role(context, role_id=role_id) else: return self.get_role(context, role_id=role_id) @controller.protected() def get_role(self, context, role_id): return self._get_role(context, role_id) @controller.protected() def get_domain_role(self, context, role_id): return self._get_role(context, role_id) def update_role_wrapper(self, context, role_id, role): # Since we don't allow you change whether a role is global or domain # specific, we can ignore the new update attributes and just look at # the existing role. if self._is_domain_role_target(role_id): return self.update_domain_role( context, role_id=role_id, role=role) else: return self.update_role(context, role_id=role_id, role=role) @controller.protected() @validation.validated(schema.role_update, 'role') def update_role(self, context, role_id, role): return self._update_role(context, role_id, role) @controller.protected() @validation.validated(schema.role_update, 'role') def update_domain_role(self, context, role_id, role): return self._update_role(context, role_id, role) def delete_role_wrapper(self, context, role_id): if self._is_domain_role_target(role_id): return self.delete_domain_role(context, role_id=role_id) else: return self.delete_role(context, role_id=role_id) @controller.protected() def delete_role(self, context, role_id): return self._delete_role(context, role_id) @controller.protected() def delete_domain_role(self, context, role_id): return self._delete_role(context, role_id) def _create_role(self, context, role): if role['name'] == CONF.member_role_name: # Use the configured member role ID when creating the configured # member role name. This avoids the potential of creating a # "member" role with an unexpected ID. role['id'] = CONF.member_role_id else: role = self._assign_unique_id(role) ref = self._normalize_dict(role) initiator = notifications._get_request_audit_info(context) ref = self.role_api.create_role(ref['id'], ref, initiator) return RoleV3.wrap_member(context, ref) def _list_roles(self, context, filters): hints = RoleV3.build_driver_hints(context, filters) refs = self.role_api.list_roles( hints=hints) return RoleV3.wrap_collection(context, refs, hints=hints) def _get_role(self, context, role_id): ref = self.role_api.get_role(role_id) return RoleV3.wrap_member(context, ref) def _update_role(self, context, role_id, role): self._require_matching_id(role_id, role) initiator = notifications._get_request_audit_info(context) ref = self.role_api.update_role(role_id, role, initiator) return RoleV3.wrap_member(context, ref) def _delete_role(self, context, role_id): initiator = notifications._get_request_audit_info(context) self.role_api.delete_role(role_id, initiator) @dependency.requires('role_api') class ImpliedRolesV3(controller.V3Controller): """The V3 ImpliedRoles CRD APIs. There is no Update.""" def _prior_role_stanza(self, endpoint, prior_role_id, prior_role_name): return { "id": prior_role_id, "links": { "self": endpoint + "/v3/roles/" + prior_role_id }, "name": prior_role_name } def _implied_role_stanza(self, endpoint, implied_role): implied_id = implied_role['id'] implied_response = { "id": implied_id, "links": { "self": endpoint + "/v3/roles/" + implied_id }, "name": implied_role['name'] } return implied_response def _populate_prior_role_response(self, endpoint, prior_id): prior_role = self.role_api.get_role(prior_id) response = { "role_inference": { "prior_role": self._prior_role_stanza( endpoint, prior_id, prior_role['name']) } } return response def _populate_implied_roles_response(self, endpoint, prior_id, implied_ids): response = self._populate_prior_role_response(endpoint, prior_id) response["role_inference"]['implies'] = [] for implied_id in implied_ids: implied_role = self.role_api.get_role(implied_id) implied_response = self._implied_role_stanza( endpoint, implied_role) response["role_inference"]['implies'].append(implied_response) return response def _populate_implied_role_response(self, endpoint, prior_id, implied_id): response = self._populate_prior_role_response(endpoint, prior_id) implied_role = self.role_api.get_role(implied_id) stanza = self._implied_role_stanza(endpoint, implied_role) response["role_inference"]['implies'] = stanza return response @controller.protected() def get_implied_role(self, context, prior_role_id, implied_role_id): ref = self.role_api.get_implied_role(prior_role_id, implied_role_id) prior_id = ref['prior_role_id'] implied_id = ref['implied_role_id'] endpoint = super(controller.V3Controller, ImpliedRolesV3).base_url( context, 'public') response = self._populate_implied_role_response( endpoint, prior_id, implied_id) return response @controller.protected() def check_implied_role(self, context, prior_role_id, implied_role_id): self.role_api.get_implied_role(prior_role_id, implied_role_id) @controller.protected() def create_implied_role(self, context, prior_role_id, implied_role_id): self.role_api.create_implied_role(prior_role_id, implied_role_id) return wsgi.render_response( self.get_implied_role(context, prior_role_id, implied_role_id), status=(201, 'Created')) @controller.protected() def delete_implied_role(self, context, prior_role_id, implied_role_id): self.role_api.delete_implied_role(prior_role_id, implied_role_id) @controller.protected() def list_implied_roles(self, context, prior_role_id): ref = self.role_api.list_implied_roles(prior_role_id) implied_ids = [r['implied_role_id'] for r in ref] endpoint = super(controller.V3Controller, ImpliedRolesV3).base_url( context, 'public') results = self._populate_implied_roles_response( endpoint, prior_role_id, implied_ids) return results @controller.protected() def list_role_inference_rules(self, context): refs = self.role_api.list_role_inference_rules() role_dict = {role_ref['id']: role_ref for role_ref in self.role_api.list_roles()} rules = dict() endpoint = super(controller.V3Controller, ImpliedRolesV3).base_url( context, 'public') for ref in refs: implied_role_id = ref['implied_role_id'] prior_role_id = ref['prior_role_id'] implied = rules.get(prior_role_id, []) implied.append(self._implied_role_stanza( endpoint, role_dict[implied_role_id])) rules[prior_role_id] = implied inferences = [] for prior_id, implied in rules.items(): prior_response = self._prior_role_stanza( endpoint, prior_id, role_dict[prior_id]['name']) inferences.append({'prior_role': prior_response, 'implies': implied}) results = {'role_inferences': inferences} return results @dependency.requires('assignment_api', 'identity_api', 'resource_api', 'role_api') class GrantAssignmentV3(controller.V3Controller): """The V3 Grant Assignment APIs.""" collection_name = 'roles' member_name = 'role' def __init__(self): super(GrantAssignmentV3, self).__init__() self.get_member_from_driver = self.role_api.get_role def _require_domain_xor_project(self, domain_id, project_id): if domain_id and project_id: msg = _('Specify a domain or project, not both') raise exception.ValidationError(msg) if not domain_id and not project_id: msg = _('Specify one of domain or project') raise exception.ValidationError(msg) def _require_user_xor_group(self, user_id, group_id): if user_id and group_id: msg = _('Specify a user or group, not both') raise exception.ValidationError(msg) if not user_id and not group_id: msg = _('Specify one of user or group') raise exception.ValidationError(msg) def _check_if_inherited(self, context): return (CONF.os_inherit.enabled and context['path'].startswith('/OS-INHERIT') and context['path'].endswith('/inherited_to_projects')) def _check_grant_protection(self, context, protection, role_id=None, user_id=None, group_id=None, domain_id=None, project_id=None, allow_no_user=False): """Check protection for role grant APIs. The policy rule might want to inspect attributes of any of the entities involved in the grant. So we get these and pass them to the check_protection() handler in the controller. """ ref = {} if role_id: ref['role'] = self.role_api.get_role(role_id) if user_id: try: ref['user'] = self.identity_api.get_user(user_id) except exception.UserNotFound: if not allow_no_user: raise else: ref['group'] = self.identity_api.get_group(group_id) if domain_id: ref['domain'] = self.resource_api.get_domain(domain_id) else: ref['project'] = self.resource_api.get_project(project_id) self.check_protection(context, protection, ref) @controller.protected(callback=_check_grant_protection) def create_grant(self, context, role_id, user_id=None, group_id=None, domain_id=None, project_id=None): """Grants a role to a user or group on either a domain or project.""" self._require_domain_xor_project(domain_id, project_id) self._require_user_xor_group(user_id, group_id) self.assignment_api.create_grant( role_id, user_id, group_id, domain_id, project_id, self._check_if_inherited(context), context) @controller.protected(callback=_check_grant_protection) def list_grants(self, context, user_id=None, group_id=None, domain_id=None, project_id=None): """Lists roles granted to user/group on either a domain or project.""" self._require_domain_xor_project(domain_id, project_id) self._require_user_xor_group(user_id, group_id) refs = self.assignment_api.list_grants( user_id, group_id, domain_id, project_id, self._check_if_inherited(context)) return GrantAssignmentV3.wrap_collection(context, refs) @controller.protected(callback=_check_grant_protection) def check_grant(self, context, role_id, user_id=None, group_id=None, domain_id=None, project_id=None): """Checks if a role has been granted on either a domain or project.""" self._require_domain_xor_project(domain_id, project_id) self._require_user_xor_group(user_id, group_id) self.assignment_api.get_grant( role_id, user_id, group_id, domain_id, project_id, self._check_if_inherited(context)) # NOTE(lbragstad): This will allow users to clean up role assignments # from the backend in the event the user was removed prior to the role # assignment being removed. @controller.protected(callback=functools.partial( _check_grant_protection, allow_no_user=True)) def revoke_grant(self, context, role_id, user_id=None, group_id=None, domain_id=None, project_id=None): """Revokes a role from user/group on either a domain or project.""" self._require_domain_xor_project(domain_id, project_id) self._require_user_xor_group(user_id, group_id) self.assignment_api.delete_grant( role_id, user_id, group_id, domain_id, project_id, self._check_if_inherited(context), context) @dependency.requires('assignment_api', 'identity_api', 'resource_api') class RoleAssignmentV3(controller.V3Controller): """The V3 Role Assignment APIs, really just list_role_assignment().""" # TODO(henry-nash): The current implementation does not provide a full # first class entity for role-assignment. There is no role_assignment_id # and only the list_role_assignment call is supported. Further, since it # is not a first class entity, the links for the individual entities # reference the individual role grant APIs. collection_name = 'role_assignments' member_name = 'role_assignment' @classmethod def wrap_member(cls, context, ref): # NOTE(henry-nash): Since we are not yet a true collection, we override # the wrapper as have already included the links in the entities pass def _format_entity(self, context, entity): """Format an assignment entity for API response. The driver layer returns entities as dicts containing the ids of the actor (e.g. user or group), target (e.g. domain or project) and role. If it is an inherited role, then this is also indicated. Examples: For a non-inherited expanded assignment from group membership: {'user_id': user_id, 'project_id': project_id, 'role_id': role_id, 'indirect': {'group_id': group_id}} or, for a project inherited role: {'user_id': user_id, 'project_id': project_id, 'role_id': role_id, 'indirect': {'project_id': parent_id}} or, for a role that was implied by a prior role: {'user_id': user_id, 'project_id': project_id, 'role_id': role_id, 'indirect': {'role_id': prior role_id}} It is possible to deduce if a role assignment came from group membership if it has both 'user_id' in the main body of the dict and 'group_id' in the 'indirect' subdict, as well as it is possible to deduce if it has come from inheritance if it contains both a 'project_id' in the main body of the dict and 'parent_id' in the 'indirect' subdict. This function maps this into the format to be returned via the API, e.g. for the second example above: { 'user': { {'id': user_id} }, 'scope': { 'project': { {'id': project_id} }, 'OS-INHERIT:inherited_to': 'projects' }, 'role': { {'id': role_id} }, 'links': { 'assignment': '/OS-INHERIT/projects/parent_id/users/user_id/' 'roles/role_id/inherited_to_projects' } } """ formatted_entity = {'links': {}} inherited_assignment = entity.get('inherited_to_projects') if 'project_id' in entity: if 'project_name' in entity: formatted_entity['scope'] = {'project': { 'id': entity['project_id'], 'name': entity['project_name'], 'domain': {'id': entity['project_domain_id'], 'name': entity['project_domain_name']}}} else: formatted_entity['scope'] = { 'project': {'id': entity['project_id']}} if 'domain_id' in entity.get('indirect', {}): inherited_assignment = True formatted_link = ('/domains/%s' % entity['indirect']['domain_id']) elif 'project_id' in entity.get('indirect', {}): inherited_assignment = True formatted_link = ('/projects/%s' % entity['indirect']['project_id']) else: formatted_link = '/projects/%s' % entity['project_id'] elif 'domain_id' in entity: if 'domain_name' in entity: formatted_entity['scope'] = { 'domain': {'id': entity['domain_id'], 'name': entity['domain_name']}} else: formatted_entity['scope'] = { 'domain': {'id': entity['domain_id']}} formatted_link = '/domains/%s' % entity['domain_id'] if 'user_id' in entity: if 'user_name' in entity: formatted_entity['user'] = { 'id': entity['user_id'], 'name': entity['user_name'], 'domain': {'id': entity['user_domain_id'], 'name': entity['user_domain_name']}} else: formatted_entity['user'] = {'id': entity['user_id']} if 'group_id' in entity.get('indirect', {}): membership_url = ( self.base_url(context, '/groups/%s/users/%s' % ( entity['indirect']['group_id'], entity['user_id']))) formatted_entity['links']['membership'] = membership_url formatted_link += '/groups/%s' % entity['indirect']['group_id'] else: formatted_link += '/users/%s' % entity['user_id'] elif 'group_id' in entity: if 'group_name' in entity: formatted_entity['group'] = { 'id': entity['group_id'], 'name': entity['group_name'], 'domain': {'id': entity['group_domain_id'], 'name': entity['group_domain_name']}} else: formatted_entity['group'] = {'id': entity['group_id']} formatted_link += '/groups/%s' % entity['group_id'] if 'role_name' in entity: formatted_entity['role'] = {'id': entity['role_id'], 'name': entity['role_name']} else: formatted_entity['role'] = {'id': entity['role_id']} prior_role_link = '' if 'role_id' in entity.get('indirect', {}): formatted_link += '/roles/%s' % entity['indirect']['role_id'] prior_role_link = ( '/prior_role/%(prior)s/implies/%(implied)s' % { 'prior': entity['role_id'], 'implied': entity['indirect']['role_id'] }) else: formatted_link += '/roles/%s' % entity['role_id'] if inherited_assignment: formatted_entity['scope']['OS-INHERIT:inherited_to'] = ( 'projects') formatted_link = ('/OS-INHERIT%s/inherited_to_projects' % formatted_link) formatted_entity['links']['assignment'] = self.base_url(context, formatted_link) if prior_role_link: formatted_entity['links']['prior_role'] = ( self.base_url(context, prior_role_link)) return formatted_entity def _assert_effective_filters(self, inherited, group, domain): """Assert that useless filter combinations are avoided. In effective mode, the following filter combinations are useless, since they would always return an empty list of role assignments: - group id, since no group assignment is returned in effective mode; - domain id and inherited, since no domain inherited assignment is returned in effective mode. """ if group: msg = _('Combining effective and group filter will always ' 'result in an empty list.') raise exception.ValidationError(msg) if inherited and domain: msg = _('Combining effective, domain and inherited filters will ' 'always result in an empty list.') raise exception.ValidationError(msg) def _assert_domain_nand_project(self, domain_id, project_id): if domain_id and project_id: msg = _('Specify a domain or project, not both') raise exception.ValidationError(msg) def _assert_user_nand_group(self, user_id, group_id): if user_id and group_id: msg = _('Specify a user or group, not both') raise exception.ValidationError(msg) def _list_role_assignments(self, context, filters, include_subtree=False): """List role assignments to user and groups on domains and projects. Return a list of all existing role assignments in the system, filtered by assignments attributes, if provided. If effective option is used and OS-INHERIT extension is enabled, the following functions will be applied: 1) For any group role assignment on a target, replace it by a set of role assignments containing one for each user of that group on that target; 2) For any inherited role assignment for an actor on a target, replace it by a set of role assignments for that actor on every project under that target. It means that, if effective mode is used, no group or domain inherited assignments will be present in the resultant list. Thus, combining effective with them is invalid. As a role assignment contains only one actor and one target, providing both user and group ids or domain and project ids is invalid as well. """ params = context['query_string'] effective = 'effective' in params and ( self.query_filter_is_true(params['effective'])) include_names = ('include_names' in params and self.query_filter_is_true(params['include_names'])) if 'scope.OS-INHERIT:inherited_to' in params: inherited = ( params['scope.OS-INHERIT:inherited_to'] == 'projects') else: # None means querying both inherited and direct assignments inherited = None self._assert_domain_nand_project(params.get('scope.domain.id'), params.get('scope.project.id')) self._assert_user_nand_group(params.get('user.id'), params.get('group.id')) if effective: self._assert_effective_filters(inherited=inherited, group=params.get('group.id'), domain=params.get( 'scope.domain.id')) refs = self.assignment_api.list_role_assignments( role_id=params.get('role.id'), user_id=params.get('user.id'), group_id=params.get('group.id'), domain_id=params.get('scope.domain.id'), project_id=params.get('scope.project.id'), include_subtree=include_subtree, inherited=inherited, effective=effective, include_names=include_names) formatted_refs = [self._format_entity(context, ref) for ref in refs] return self.wrap_collection(context, formatted_refs) @controller.filterprotected('group.id', 'role.id', 'scope.domain.id', 'scope.project.id', 'scope.OS-INHERIT:inherited_to', 'user.id') def list_role_assignments(self, context, filters): return self._list_role_assignments(context, filters) def _check_list_tree_protection(self, context, protection_info): """Check protection for list assignment for tree API. The policy rule might want to inspect the domain of any project filter so if one is defined, then load the project ref and pass it to the check protection method. """ ref = {} for filter, value in protection_info['filter_attr'].items(): if filter == 'scope.project.id' and value: ref['project'] = self.resource_api.get_project(value) self.check_protection(context, protection_info, ref) @controller.filterprotected('group.id', 'role.id', 'scope.domain.id', 'scope.project.id', 'scope.OS-INHERIT:inherited_to', 'user.id', callback=_check_list_tree_protection) def list_role_assignments_for_tree(self, context, filters): if not context['query_string'].get('scope.project.id'): msg = _('scope.project.id must be specified if include_subtree ' 'is also specified') raise exception.ValidationError(message=msg) return self._list_role_assignments(context, filters, include_subtree=True) def list_role_assignments_wrapper(self, context): """Main entry point from router for list role assignments. Since we want different policy file rules to be applicable based on whether there the include_subtree query parameter is part of the API call, this method checks for this and then calls the appropriate protected entry point. """ params = context['query_string'] if 'include_subtree' in params and ( self.query_filter_is_true(params['include_subtree'])): return self.list_role_assignments_for_tree(context) else: return self.list_role_assignments(context) keystone-9.0.0/keystone/assignment/routers.py0000664000567000056710000002753412701407105022620 0ustar jenkinsjenkins00000000000000# Copyright 2013 Metacloud, Inc. # Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """WSGI Routers for the Assignment service.""" import functools from oslo_config import cfg from keystone.assignment import controllers from keystone.common import json_home from keystone.common import router from keystone.common import wsgi CONF = cfg.CONF build_os_inherit_relation = functools.partial( json_home.build_v3_extension_resource_relation, extension_name='OS-INHERIT', extension_version='1.0') class Public(wsgi.ComposableRouter): def add_routes(self, mapper): tenant_controller = controllers.TenantAssignment() mapper.connect('/tenants', controller=tenant_controller, action='get_projects_for_token', conditions=dict(method=['GET'])) class Admin(wsgi.ComposableRouter): def add_routes(self, mapper): # Role Operations roles_controller = controllers.RoleAssignmentV2() mapper.connect('/tenants/{tenant_id}/users/{user_id}/roles', controller=roles_controller, action='get_user_roles', conditions=dict(method=['GET'])) mapper.connect('/users/{user_id}/roles', controller=roles_controller, action='get_user_roles', conditions=dict(method=['GET'])) class Routers(wsgi.RoutersBase): def append_v3_routers(self, mapper, routers): project_controller = controllers.ProjectAssignmentV3() self._add_resource( mapper, project_controller, path='/users/{user_id}/projects', get_action='list_user_projects', rel=json_home.build_v3_resource_relation('user_projects'), path_vars={ 'user_id': json_home.Parameters.USER_ID, }) routers.append( router.Router(controllers.RoleV3(), 'roles', 'role', resource_descriptions=self.v3_resources, method_template='%s_wrapper')) implied_roles_controller = controllers.ImpliedRolesV3() self._add_resource( mapper, implied_roles_controller, path='/roles/{prior_role_id}/implies', rel=json_home.build_v3_resource_relation('implied_roles'), get_action='list_implied_roles', status=json_home.Status.EXPERIMENTAL, path_vars={ 'prior_role_id': json_home.Parameters.ROLE_ID, } ) self._add_resource( mapper, implied_roles_controller, path='/roles/{prior_role_id}/implies/{implied_role_id}', put_action='create_implied_role', delete_action='delete_implied_role', head_action='check_implied_role', get_action='get_implied_role', rel=json_home.build_v3_resource_relation('implied_role'), status=json_home.Status.EXPERIMENTAL, path_vars={ 'prior_role_id': json_home.Parameters.ROLE_ID, 'implied_role_id': json_home.Parameters.ROLE_ID } ) self._add_resource( mapper, implied_roles_controller, path='/role_inferences', get_action='list_role_inference_rules', rel=json_home.build_v3_resource_relation('role_inferences'), status=json_home.Status.EXPERIMENTAL, path_vars={} ) grant_controller = controllers.GrantAssignmentV3() self._add_resource( mapper, grant_controller, path='/projects/{project_id}/users/{user_id}/roles/{role_id}', get_head_action='check_grant', put_action='create_grant', delete_action='revoke_grant', rel=json_home.build_v3_resource_relation('project_user_role'), path_vars={ 'project_id': json_home.Parameters.PROJECT_ID, 'role_id': json_home.Parameters.ROLE_ID, 'user_id': json_home.Parameters.USER_ID, }) self._add_resource( mapper, grant_controller, path='/projects/{project_id}/groups/{group_id}/roles/{role_id}', get_head_action='check_grant', put_action='create_grant', delete_action='revoke_grant', rel=json_home.build_v3_resource_relation('project_group_role'), path_vars={ 'group_id': json_home.Parameters.GROUP_ID, 'project_id': json_home.Parameters.PROJECT_ID, 'role_id': json_home.Parameters.ROLE_ID, }) self._add_resource( mapper, grant_controller, path='/projects/{project_id}/users/{user_id}/roles', get_action='list_grants', rel=json_home.build_v3_resource_relation('project_user_roles'), path_vars={ 'project_id': json_home.Parameters.PROJECT_ID, 'user_id': json_home.Parameters.USER_ID, }) self._add_resource( mapper, grant_controller, path='/projects/{project_id}/groups/{group_id}/roles', get_action='list_grants', rel=json_home.build_v3_resource_relation('project_group_roles'), path_vars={ 'group_id': json_home.Parameters.GROUP_ID, 'project_id': json_home.Parameters.PROJECT_ID, }) self._add_resource( mapper, grant_controller, path='/domains/{domain_id}/users/{user_id}/roles/{role_id}', get_head_action='check_grant', put_action='create_grant', delete_action='revoke_grant', rel=json_home.build_v3_resource_relation('domain_user_role'), path_vars={ 'domain_id': json_home.Parameters.DOMAIN_ID, 'role_id': json_home.Parameters.ROLE_ID, 'user_id': json_home.Parameters.USER_ID, }) self._add_resource( mapper, grant_controller, path='/domains/{domain_id}/groups/{group_id}/roles/{role_id}', get_head_action='check_grant', put_action='create_grant', delete_action='revoke_grant', rel=json_home.build_v3_resource_relation('domain_group_role'), path_vars={ 'domain_id': json_home.Parameters.DOMAIN_ID, 'group_id': json_home.Parameters.GROUP_ID, 'role_id': json_home.Parameters.ROLE_ID, }) self._add_resource( mapper, grant_controller, path='/domains/{domain_id}/users/{user_id}/roles', get_action='list_grants', rel=json_home.build_v3_resource_relation('domain_user_roles'), path_vars={ 'domain_id': json_home.Parameters.DOMAIN_ID, 'user_id': json_home.Parameters.USER_ID, }) self._add_resource( mapper, grant_controller, path='/domains/{domain_id}/groups/{group_id}/roles', get_action='list_grants', rel=json_home.build_v3_resource_relation('domain_group_roles'), path_vars={ 'domain_id': json_home.Parameters.DOMAIN_ID, 'group_id': json_home.Parameters.GROUP_ID, }) self._add_resource( mapper, controllers.RoleAssignmentV3(), path='/role_assignments', get_action='list_role_assignments_wrapper', rel=json_home.build_v3_resource_relation('role_assignments')) if CONF.os_inherit.enabled: self._add_resource( mapper, grant_controller, path='/OS-INHERIT/domains/{domain_id}/users/{user_id}/roles/' '{role_id}/inherited_to_projects', get_head_action='check_grant', put_action='create_grant', delete_action='revoke_grant', rel=build_os_inherit_relation( resource_name='domain_user_role_inherited_to_projects'), path_vars={ 'domain_id': json_home.Parameters.DOMAIN_ID, 'role_id': json_home.Parameters.ROLE_ID, 'user_id': json_home.Parameters.USER_ID, }) self._add_resource( mapper, grant_controller, path='/OS-INHERIT/domains/{domain_id}/groups/{group_id}/roles/' '{role_id}/inherited_to_projects', get_head_action='check_grant', put_action='create_grant', delete_action='revoke_grant', rel=build_os_inherit_relation( resource_name='domain_group_role_inherited_to_projects'), path_vars={ 'domain_id': json_home.Parameters.DOMAIN_ID, 'group_id': json_home.Parameters.GROUP_ID, 'role_id': json_home.Parameters.ROLE_ID, }) self._add_resource( mapper, grant_controller, path='/OS-INHERIT/domains/{domain_id}/groups/{group_id}/roles/' 'inherited_to_projects', get_action='list_grants', rel=build_os_inherit_relation( resource_name='domain_group_roles_inherited_to_projects'), path_vars={ 'domain_id': json_home.Parameters.DOMAIN_ID, 'group_id': json_home.Parameters.GROUP_ID, }) self._add_resource( mapper, grant_controller, path='/OS-INHERIT/domains/{domain_id}/users/{user_id}/roles/' 'inherited_to_projects', get_action='list_grants', rel=build_os_inherit_relation( resource_name='domain_user_roles_inherited_to_projects'), path_vars={ 'domain_id': json_home.Parameters.DOMAIN_ID, 'user_id': json_home.Parameters.USER_ID, }) self._add_resource( mapper, grant_controller, path='/OS-INHERIT/projects/{project_id}/users/{user_id}/roles/' '{role_id}/inherited_to_projects', get_head_action='check_grant', put_action='create_grant', delete_action='revoke_grant', rel=build_os_inherit_relation( resource_name='project_user_role_inherited_to_projects'), path_vars={ 'project_id': json_home.Parameters.PROJECT_ID, 'user_id': json_home.Parameters.USER_ID, 'role_id': json_home.Parameters.ROLE_ID, }) self._add_resource( mapper, grant_controller, path='/OS-INHERIT/projects/{project_id}/groups/{group_id}/' 'roles/{role_id}/inherited_to_projects', get_head_action='check_grant', put_action='create_grant', delete_action='revoke_grant', rel=build_os_inherit_relation( resource_name='project_group_role_inherited_to_projects'), path_vars={ 'project_id': json_home.Parameters.PROJECT_ID, 'group_id': json_home.Parameters.GROUP_ID, 'role_id': json_home.Parameters.ROLE_ID, }) keystone-9.0.0/keystone/assignment/V8_backends/0000775000567000056710000000000012701407246022665 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/assignment/V8_backends/__init__.py0000664000567000056710000000000012701407102024753 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/assignment/V8_backends/sql.py0000664000567000056710000004405012701407102024030 0ustar jenkinsjenkins00000000000000# Copyright 2012-13 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg import sqlalchemy from sqlalchemy.sql.expression import false from keystone import assignment as keystone_assignment from keystone.common import sql from keystone import exception from keystone.i18n import _ CONF = cfg.CONF class AssignmentType(object): USER_PROJECT = 'UserProject' GROUP_PROJECT = 'GroupProject' USER_DOMAIN = 'UserDomain' GROUP_DOMAIN = 'GroupDomain' @classmethod def calculate_type(cls, user_id, group_id, project_id, domain_id): if user_id: if project_id: return cls.USER_PROJECT if domain_id: return cls.USER_DOMAIN if group_id: if project_id: return cls.GROUP_PROJECT if domain_id: return cls.GROUP_DOMAIN # Invalid parameters combination raise exception.AssignmentTypeCalculationError(**locals()) class Assignment(keystone_assignment.AssignmentDriverV8): def default_role_driver(self): return 'sql' def default_resource_driver(self): return 'sql' def list_user_ids_for_project(self, tenant_id): with sql.session_for_read() as session: query = session.query(RoleAssignment.actor_id) query = query.filter_by(type=AssignmentType.USER_PROJECT) query = query.filter_by(target_id=tenant_id) query = query.distinct('actor_id') assignments = query.all() return [assignment.actor_id for assignment in assignments] def create_grant(self, role_id, user_id=None, group_id=None, domain_id=None, project_id=None, inherited_to_projects=False): assignment_type = AssignmentType.calculate_type( user_id, group_id, project_id, domain_id) try: with sql.session_for_write() as session: session.add(RoleAssignment( type=assignment_type, actor_id=user_id or group_id, target_id=project_id or domain_id, role_id=role_id, inherited=inherited_to_projects)) except sql.DBDuplicateEntry: # nosec : The v3 grant APIs are silent if # the assignment already exists pass def list_grant_role_ids(self, user_id=None, group_id=None, domain_id=None, project_id=None, inherited_to_projects=False): with sql.session_for_read() as session: q = session.query(RoleAssignment.role_id) q = q.filter(RoleAssignment.actor_id == (user_id or group_id)) q = q.filter(RoleAssignment.target_id == (project_id or domain_id)) q = q.filter(RoleAssignment.inherited == inherited_to_projects) return [x.role_id for x in q.all()] def _build_grant_filter(self, session, role_id, user_id, group_id, domain_id, project_id, inherited_to_projects): q = session.query(RoleAssignment) q = q.filter_by(actor_id=user_id or group_id) q = q.filter_by(target_id=project_id or domain_id) q = q.filter_by(role_id=role_id) q = q.filter_by(inherited=inherited_to_projects) return q def check_grant_role_id(self, role_id, user_id=None, group_id=None, domain_id=None, project_id=None, inherited_to_projects=False): with sql.session_for_read() as session: try: q = self._build_grant_filter( session, role_id, user_id, group_id, domain_id, project_id, inherited_to_projects) q.one() except sql.NotFound: actor_id = user_id or group_id target_id = domain_id or project_id raise exception.RoleAssignmentNotFound(role_id=role_id, actor_id=actor_id, target_id=target_id) def delete_grant(self, role_id, user_id=None, group_id=None, domain_id=None, project_id=None, inherited_to_projects=False): with sql.session_for_write() as session: q = self._build_grant_filter( session, role_id, user_id, group_id, domain_id, project_id, inherited_to_projects) if not q.delete(False): actor_id = user_id or group_id target_id = domain_id or project_id raise exception.RoleAssignmentNotFound(role_id=role_id, actor_id=actor_id, target_id=target_id) def _list_project_ids_for_actor(self, actors, hints, inherited, group_only=False): # TODO(henry-nash): Now that we have a single assignment table, we # should be able to honor the hints list that is provided. assignment_type = [AssignmentType.GROUP_PROJECT] if not group_only: assignment_type.append(AssignmentType.USER_PROJECT) sql_constraints = sqlalchemy.and_( RoleAssignment.type.in_(assignment_type), RoleAssignment.inherited == inherited, RoleAssignment.actor_id.in_(actors)) with sql.session_for_read() as session: query = session.query(RoleAssignment.target_id).filter( sql_constraints).distinct() return [x.target_id for x in query.all()] def list_project_ids_for_user(self, user_id, group_ids, hints, inherited=False): actor_list = [user_id] if group_ids: actor_list = actor_list + group_ids return self._list_project_ids_for_actor(actor_list, hints, inherited) def list_domain_ids_for_user(self, user_id, group_ids, hints, inherited=False): with sql.session_for_read() as session: query = session.query(RoleAssignment.target_id) filters = [] if user_id: sql_constraints = sqlalchemy.and_( RoleAssignment.actor_id == user_id, RoleAssignment.inherited == inherited, RoleAssignment.type == AssignmentType.USER_DOMAIN) filters.append(sql_constraints) if group_ids: sql_constraints = sqlalchemy.and_( RoleAssignment.actor_id.in_(group_ids), RoleAssignment.inherited == inherited, RoleAssignment.type == AssignmentType.GROUP_DOMAIN) filters.append(sql_constraints) if not filters: return [] query = query.filter(sqlalchemy.or_(*filters)).distinct() return [assignment.target_id for assignment in query.all()] def list_role_ids_for_groups_on_domain(self, group_ids, domain_id): if not group_ids: # If there's no groups then there will be no domain roles. return [] sql_constraints = sqlalchemy.and_( RoleAssignment.type == AssignmentType.GROUP_DOMAIN, RoleAssignment.target_id == domain_id, RoleAssignment.inherited == false(), RoleAssignment.actor_id.in_(group_ids)) with sql.session_for_read() as session: query = session.query(RoleAssignment.role_id).filter( sql_constraints).distinct() return [role.role_id for role in query.all()] def list_role_ids_for_groups_on_project( self, group_ids, project_id, project_domain_id, project_parents): if not group_ids: # If there's no groups then there will be no project roles. return [] # NOTE(rodrigods): First, we always include projects with # non-inherited assignments sql_constraints = sqlalchemy.and_( RoleAssignment.type == AssignmentType.GROUP_PROJECT, RoleAssignment.inherited == false(), RoleAssignment.target_id == project_id) if CONF.os_inherit.enabled: # Inherited roles from domains sql_constraints = sqlalchemy.or_( sql_constraints, sqlalchemy.and_( RoleAssignment.type == AssignmentType.GROUP_DOMAIN, RoleAssignment.inherited, RoleAssignment.target_id == project_domain_id)) # Inherited roles from projects if project_parents: sql_constraints = sqlalchemy.or_( sql_constraints, sqlalchemy.and_( RoleAssignment.type == AssignmentType.GROUP_PROJECT, RoleAssignment.inherited, RoleAssignment.target_id.in_(project_parents))) sql_constraints = sqlalchemy.and_( sql_constraints, RoleAssignment.actor_id.in_(group_ids)) with sql.session_for_read() as session: # NOTE(morganfainberg): Only select the columns we actually care # about here, in this case role_id. query = session.query(RoleAssignment.role_id).filter( sql_constraints).distinct() return [result.role_id for result in query.all()] def list_project_ids_for_groups(self, group_ids, hints, inherited=False): return self._list_project_ids_for_actor( group_ids, hints, inherited, group_only=True) def list_domain_ids_for_groups(self, group_ids, inherited=False): if not group_ids: # If there's no groups then there will be no domains. return [] group_sql_conditions = sqlalchemy.and_( RoleAssignment.type == AssignmentType.GROUP_DOMAIN, RoleAssignment.inherited == inherited, RoleAssignment.actor_id.in_(group_ids)) with sql.session_for_read() as session: query = session.query(RoleAssignment.target_id).filter( group_sql_conditions).distinct() return [x.target_id for x in query.all()] def add_role_to_user_and_project(self, user_id, tenant_id, role_id): try: with sql.session_for_write() as session: session.add(RoleAssignment( type=AssignmentType.USER_PROJECT, actor_id=user_id, target_id=tenant_id, role_id=role_id, inherited=False)) except sql.DBDuplicateEntry: msg = ('User %s already has role %s in tenant %s' % (user_id, role_id, tenant_id)) raise exception.Conflict(type='role grant', details=msg) def remove_role_from_user_and_project(self, user_id, tenant_id, role_id): with sql.session_for_write() as session: q = session.query(RoleAssignment) q = q.filter_by(actor_id=user_id) q = q.filter_by(target_id=tenant_id) q = q.filter_by(role_id=role_id) if q.delete() == 0: raise exception.RoleNotFound(message=_( 'Cannot remove role that has not been granted, %s') % role_id) def _get_user_assignment_types(self): return [AssignmentType.USER_PROJECT, AssignmentType.USER_DOMAIN] def _get_group_assignment_types(self): return [AssignmentType.GROUP_PROJECT, AssignmentType.GROUP_DOMAIN] def _get_project_assignment_types(self): return [AssignmentType.USER_PROJECT, AssignmentType.GROUP_PROJECT] def _get_domain_assignment_types(self): return [AssignmentType.USER_DOMAIN, AssignmentType.GROUP_DOMAIN] def _get_assignment_types(self, user, group, project, domain): """Returns a list of role assignment types based on provided entities If one of user or group (the "actor") as well as one of project or domain (the "target") are provided, the list will contain the role assignment type for that specific pair of actor and target. If only an actor or target is provided, the list will contain the role assignment types that satisfy the specified entity. For example, if user and project are provided, the return will be: [AssignmentType.USER_PROJECT] However, if only user was provided, the return would be: [AssignmentType.USER_PROJECT, AssignmentType.USER_DOMAIN] It is not expected that user and group (or project and domain) are specified - but if they are, the most fine-grained value will be chosen (i.e. user over group, project over domain). """ actor_types = [] if user: actor_types = self._get_user_assignment_types() elif group: actor_types = self._get_group_assignment_types() target_types = [] if project: target_types = self._get_project_assignment_types() elif domain: target_types = self._get_domain_assignment_types() if actor_types and target_types: return list(set(actor_types).intersection(target_types)) return actor_types or target_types def list_role_assignments(self, role_id=None, user_id=None, group_ids=None, domain_id=None, project_ids=None, inherited_to_projects=None): def denormalize_role(ref): assignment = {} if ref.type == AssignmentType.USER_PROJECT: assignment['user_id'] = ref.actor_id assignment['project_id'] = ref.target_id elif ref.type == AssignmentType.USER_DOMAIN: assignment['user_id'] = ref.actor_id assignment['domain_id'] = ref.target_id elif ref.type == AssignmentType.GROUP_PROJECT: assignment['group_id'] = ref.actor_id assignment['project_id'] = ref.target_id elif ref.type == AssignmentType.GROUP_DOMAIN: assignment['group_id'] = ref.actor_id assignment['domain_id'] = ref.target_id else: raise exception.Error(message=_( 'Unexpected assignment type encountered, %s') % ref.type) assignment['role_id'] = ref.role_id if ref.inherited: assignment['inherited_to_projects'] = 'projects' return assignment with sql.session_for_read() as session: assignment_types = self._get_assignment_types( user_id, group_ids, project_ids, domain_id) targets = None if project_ids: targets = project_ids elif domain_id: targets = [domain_id] actors = None if group_ids: actors = group_ids elif user_id: actors = [user_id] query = session.query(RoleAssignment) if role_id: query = query.filter_by(role_id=role_id) if actors: query = query.filter(RoleAssignment.actor_id.in_(actors)) if targets: query = query.filter(RoleAssignment.target_id.in_(targets)) if assignment_types: query = query.filter(RoleAssignment.type.in_(assignment_types)) if inherited_to_projects is not None: query = query.filter_by(inherited=inherited_to_projects) return [denormalize_role(ref) for ref in query.all()] def delete_project_assignments(self, project_id): with sql.session_for_write() as session: q = session.query(RoleAssignment) q = q.filter_by(target_id=project_id) q.delete(False) def delete_role_assignments(self, role_id): with sql.session_for_write() as session: q = session.query(RoleAssignment) q = q.filter_by(role_id=role_id) q.delete(False) def delete_user_assignments(self, user_id): with sql.session_for_write() as session: q = session.query(RoleAssignment) q = q.filter_by(actor_id=user_id) q.delete(False) def delete_group_assignments(self, group_id): with sql.session_for_write() as session: q = session.query(RoleAssignment) q = q.filter_by(actor_id=group_id) q.delete(False) class RoleAssignment(sql.ModelBase, sql.DictBase): __tablename__ = 'assignment' attributes = ['type', 'actor_id', 'target_id', 'role_id', 'inherited'] # NOTE(henry-nash); Postgres requires a name to be defined for an Enum type = sql.Column( sql.Enum(AssignmentType.USER_PROJECT, AssignmentType.GROUP_PROJECT, AssignmentType.USER_DOMAIN, AssignmentType.GROUP_DOMAIN, name='type'), nullable=False) actor_id = sql.Column(sql.String(64), nullable=False) target_id = sql.Column(sql.String(64), nullable=False) role_id = sql.Column(sql.String(64), nullable=False) inherited = sql.Column(sql.Boolean, default=False, nullable=False) __table_args__ = ( sql.PrimaryKeyConstraint('type', 'actor_id', 'target_id', 'role_id', 'inherited'), sql.Index('ix_actor_id', 'actor_id'), ) def to_dict(self): """Override parent method with a simpler implementation. RoleAssignment doesn't have non-indexed 'extra' attributes, so the parent implementation is not applicable. """ return dict(self.items()) keystone-9.0.0/keystone/resource/0000775000567000056710000000000012701407246020215 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/resource/backends/0000775000567000056710000000000012701407246021767 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/resource/backends/__init__.py0000664000567000056710000000000012701407102024055 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/resource/backends/sql.py0000664000567000056710000002650412701407102023136 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from keystone.common import clean from keystone.common import driver_hints from keystone.common import sql from keystone import exception from keystone.i18n import _LE, _LW from keystone import resource as keystone_resource LOG = log.getLogger(__name__) class Resource(keystone_resource.ResourceDriverV9): def default_assignment_driver(self): return 'sql' def _encode_domain_id(self, ref): if 'domain_id' in ref and ref['domain_id'] is None: new_ref = ref.copy() new_ref['domain_id'] = keystone_resource.NULL_DOMAIN_ID return new_ref else: return ref def _is_hidden_ref(self, ref): return ref.id == keystone_resource.NULL_DOMAIN_ID def _get_project(self, session, project_id): project_ref = session.query(Project).get(project_id) if project_ref is None or self._is_hidden_ref(project_ref): raise exception.ProjectNotFound(project_id=project_id) return project_ref def get_project(self, project_id): with sql.session_for_read() as session: return self._get_project(session, project_id).to_dict() def get_project_by_name(self, project_name, domain_id): with sql.session_for_read() as session: query = session.query(Project) query = query.filter_by(name=project_name) if domain_id is None: query = query.filter_by( domain_id=keystone_resource.NULL_DOMAIN_ID) else: query = query.filter_by(domain_id=domain_id) try: project_ref = query.one() except sql.NotFound: raise exception.ProjectNotFound(project_id=project_name) if self._is_hidden_ref(project_ref): raise exception.ProjectNotFound(project_id=project_name) return project_ref.to_dict() @driver_hints.truncated def list_projects(self, hints): # If there is a filter on domain_id and the value is None, then to # ensure that the sql filtering works correctly, we need to patch # the value to be NULL_DOMAIN_ID. This is safe to do here since we # know we are able to satisfy any filter of this type in the call to # filter_limit_query() below, which will remove the filter from the # hints (hence ensuring our substitution is not exposed to the caller). for f in hints.filters: if (f['name'] == 'domain_id' and f['value'] is None): f['value'] = keystone_resource.NULL_DOMAIN_ID with sql.session_for_read() as session: query = session.query(Project) project_refs = sql.filter_limit_query(Project, query, hints) return [project_ref.to_dict() for project_ref in project_refs if not self._is_hidden_ref(project_ref)] def list_projects_from_ids(self, ids): if not ids: return [] else: with sql.session_for_read() as session: query = session.query(Project) query = query.filter(Project.id.in_(ids)) return [project_ref.to_dict() for project_ref in query.all() if not self._is_hidden_ref(project_ref)] def list_project_ids_from_domain_ids(self, domain_ids): if not domain_ids: return [] else: with sql.session_for_read() as session: query = session.query(Project.id) query = ( query.filter(Project.domain_id.in_(domain_ids))) return [x.id for x in query.all() if not self._is_hidden_ref(x)] def list_projects_in_domain(self, domain_id): with sql.session_for_read() as session: try: self._get_project(session, domain_id) except exception.ProjectNotFound: raise exception.DomainNotFound(domain_id=domain_id) query = session.query(Project) project_refs = query.filter(Project.domain_id == domain_id) return [project_ref.to_dict() for project_ref in project_refs] def list_projects_acting_as_domain(self, hints): hints.add_filter('is_domain', True) return self.list_projects(hints) def _get_children(self, session, project_ids, domain_id=None): query = session.query(Project) query = query.filter(Project.parent_id.in_(project_ids)) project_refs = query.all() return [project_ref.to_dict() for project_ref in project_refs] def list_projects_in_subtree(self, project_id): with sql.session_for_read() as session: children = self._get_children(session, [project_id]) subtree = [] examined = set([project_id]) while children: children_ids = set() for ref in children: if ref['id'] in examined: msg = _LE('Circular reference or a repeated ' 'entry found in projects hierarchy - ' '%(project_id)s.') LOG.error(msg, {'project_id': ref['id']}) return children_ids.add(ref['id']) examined.update(children_ids) subtree += children children = self._get_children(session, children_ids) return subtree def list_project_parents(self, project_id): with sql.session_for_read() as session: project = self._get_project(session, project_id).to_dict() parents = [] examined = set() while project.get('parent_id') is not None: if project['id'] in examined: msg = _LE('Circular reference or a repeated ' 'entry found in projects hierarchy - ' '%(project_id)s.') LOG.error(msg, {'project_id': project['id']}) return examined.add(project['id']) parent_project = self._get_project( session, project['parent_id']).to_dict() parents.append(parent_project) project = parent_project return parents def is_leaf_project(self, project_id): with sql.session_for_read() as session: project_refs = self._get_children(session, [project_id]) return not project_refs # CRUD @sql.handle_conflicts(conflict_type='project') def create_project(self, project_id, project): project['name'] = clean.project_name(project['name']) new_project = self._encode_domain_id(project) with sql.session_for_write() as session: project_ref = Project.from_dict(new_project) session.add(project_ref) return project_ref.to_dict() @sql.handle_conflicts(conflict_type='project') def update_project(self, project_id, project): if 'name' in project: project['name'] = clean.project_name(project['name']) update_project = self._encode_domain_id(project) with sql.session_for_write() as session: project_ref = self._get_project(session, project_id) old_project_dict = project_ref.to_dict() for k in update_project: old_project_dict[k] = update_project[k] # When we read the old_project_dict, any "null" domain_id will have # been decoded, so we need to re-encode it old_project_dict = self._encode_domain_id(old_project_dict) new_project = Project.from_dict(old_project_dict) for attr in Project.attributes: if attr != 'id': setattr(project_ref, attr, getattr(new_project, attr)) project_ref.extra = new_project.extra return project_ref.to_dict(include_extra_dict=True) @sql.handle_conflicts(conflict_type='project') def delete_project(self, project_id): with sql.session_for_write() as session: project_ref = self._get_project(session, project_id) session.delete(project_ref) @sql.handle_conflicts(conflict_type='project') def delete_projects_from_ids(self, project_ids): if not project_ids: return with sql.session_for_write() as session: query = session.query(Project).filter(Project.id.in_( project_ids)) project_ids_from_bd = [p['id'] for p in query.all()] for project_id in project_ids: if (project_id not in project_ids_from_bd or project_id == keystone_resource.NULL_DOMAIN_ID): LOG.warning(_LW('Project %s does not exist and was not ' 'deleted.') % project_id) query.delete(synchronize_session=False) class Domain(sql.ModelBase, sql.DictBase): __tablename__ = 'domain' attributes = ['id', 'name', 'enabled'] id = sql.Column(sql.String(64), primary_key=True) name = sql.Column(sql.String(64), nullable=False) enabled = sql.Column(sql.Boolean, default=True, nullable=False) extra = sql.Column(sql.JsonBlob()) __table_args__ = (sql.UniqueConstraint('name'),) class Project(sql.ModelBase, sql.DictBase): # NOTE(henry-nash): From the manager and above perspective, the domain_id # is nullable. However, to ensure uniqueness in multi-process # configurations, it is better to still use the sql uniqueness constraint. # Since the support for a nullable component of a uniqueness constraint # across different sql databases is mixed, we instead store a special value # to represent null, as defined in NULL_DOMAIN_ID above. def to_dict(self, include_extra_dict=False): d = super(Project, self).to_dict( include_extra_dict=include_extra_dict) if d['domain_id'] == keystone_resource.NULL_DOMAIN_ID: d['domain_id'] = None return d __tablename__ = 'project' attributes = ['id', 'name', 'domain_id', 'description', 'enabled', 'parent_id', 'is_domain'] id = sql.Column(sql.String(64), primary_key=True) name = sql.Column(sql.String(64), nullable=False) domain_id = sql.Column(sql.String(64), sql.ForeignKey('project.id'), nullable=False) description = sql.Column(sql.Text()) enabled = sql.Column(sql.Boolean) extra = sql.Column(sql.JsonBlob()) parent_id = sql.Column(sql.String(64), sql.ForeignKey('project.id')) is_domain = sql.Column(sql.Boolean, default=False, nullable=False, server_default='0') # Unique constraint across two columns to create the separation # rather than just only 'name' being unique __table_args__ = (sql.UniqueConstraint('domain_id', 'name'),) keystone-9.0.0/keystone/resource/schema.py0000664000567000056710000000450112701407105022021 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common import validation from keystone.common.validation import parameter_types _project_properties = { 'description': validation.nullable(parameter_types.description), # NOTE(htruta): domain_id is nullable for projects acting as a domain. 'domain_id': validation.nullable(parameter_types.id_string), 'enabled': parameter_types.boolean, 'is_domain': parameter_types.boolean, 'parent_id': validation.nullable(parameter_types.id_string), 'name': { 'type': 'string', 'minLength': 1, 'maxLength': 64 } } project_create = { 'type': 'object', 'properties': _project_properties, # NOTE(lbragstad): A project name is the only parameter required for # project creation according to the Identity V3 API. We should think # about using the maxProperties validator here, and in update. 'required': ['name'], 'additionalProperties': True } project_update = { 'type': 'object', 'properties': _project_properties, # NOTE(lbragstad): Make sure at least one property is being updated 'minProperties': 1, 'additionalProperties': True } _domain_properties = { 'description': validation.nullable(parameter_types.description), 'enabled': parameter_types.boolean, 'name': { 'type': 'string', 'minLength': 1, 'maxLength': 64 } } domain_create = { 'type': 'object', 'properties': _domain_properties, # TODO(lbragstad): According to the V3 API spec, name isn't required but # the current implementation in assignment.controller:DomainV3 requires a # name for the domain. 'required': ['name'], 'additionalProperties': True } domain_update = { 'type': 'object', 'properties': _domain_properties, 'minProperties': 1, 'additionalProperties': True } keystone-9.0.0/keystone/resource/__init__.py0000664000567000056710000000120112701407102022307 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.resource import controllers # noqa from keystone.resource.core import * # noqa keystone-9.0.0/keystone/resource/core.py0000664000567000056710000026270712701407105021527 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Main entry point into the Resource service.""" import abc import copy from oslo_config import cfg from oslo_log import log from oslo_log import versionutils import six from keystone import assignment from keystone.common import cache from keystone.common import clean from keystone.common import dependency from keystone.common import driver_hints from keystone.common import manager from keystone.common import utils from keystone import exception from keystone.i18n import _, _LE, _LW from keystone import notifications CONF = cfg.CONF LOG = log.getLogger(__name__) MEMOIZE = cache.get_memoization_decorator(group='resource') def calc_default_domain(): return {'description': (u'The default domain'), 'enabled': True, 'id': CONF.identity.default_domain_id, 'name': u'Default'} def _get_project_from_domain(domain_ref): """Creates a project ref from the provided domain ref.""" project_ref = domain_ref.copy() project_ref['is_domain'] = True project_ref['domain_id'] = None project_ref['parent_id'] = None return project_ref @dependency.provider('resource_api') @dependency.requires('assignment_api', 'credential_api', 'domain_config_api', 'identity_api', 'revoke_api') class Manager(manager.Manager): """Default pivot point for the Resource backend. See :mod:`keystone.common.manager.Manager` for more details on how this dynamically calls the backend. """ driver_namespace = 'keystone.resource' _DOMAIN = 'domain' _PROJECT = 'project' def __init__(self): # If there is a specific driver specified for resource, then use it. # Otherwise retrieve the driver type from the assignment driver. resource_driver = CONF.resource.driver if resource_driver is None: assignment_manager = dependency.get_provider('assignment_api') resource_driver = assignment_manager.default_resource_driver() super(Manager, self).__init__(resource_driver) # Make sure it is a driver version we support, and if it is a legacy # driver, then wrap it. if isinstance(self.driver, ResourceDriverV8): self.driver = V9ResourceWrapperForV8Driver(self.driver) elif not isinstance(self.driver, ResourceDriverV9): raise exception.UnsupportedDriverVersion(driver=resource_driver) def _get_hierarchy_depth(self, parents_list): return len(parents_list) + 1 def _assert_max_hierarchy_depth(self, project_id, parents_list=None): if parents_list is None: parents_list = self.list_project_parents(project_id) # NOTE(henry-nash): In upgrading to a scenario where domains are # represented as projects acting as domains, we will effectively # increase the depth of any existing project hierarchy by one. To avoid # pushing any existing hierarchies over the limit, we add one to the # maximum depth allowed, as specified in the configuration file. max_depth = CONF.max_project_tree_depth + 1 if self._get_hierarchy_depth(parents_list) > max_depth: raise exception.ForbiddenNotSecurity( _('Max hierarchy depth reached for %s branch.') % project_id) def _assert_is_domain_project_constraints(self, project_ref): """Enforces specific constraints of projects that act as domains Called when is_domain is true, this method ensures that: * multiple domains are enabled * the project name is not the reserved name for a federated domain * the project is a root project :raises keystone.exception.ValidationError: If one of the constraints was not satisfied. """ if (not self.identity_api.multiple_domains_supported and project_ref['id'] != CONF.identity.default_domain_id): raise exception.ValidationError( message=_('Multiple domains are not supported')) self.assert_domain_not_federated(project_ref['id'], project_ref) if project_ref['parent_id']: raise exception.ValidationError( message=_('only root projects are allowed to act as ' 'domains.')) def _assert_regular_project_constraints(self, project_ref): """Enforces regular project hierarchy constraints Called when is_domain is false. The project must contain a valid domain_id and parent_id. The goal of this method is to check that the domain_id specified is consistent with the domain of its parent. :raises keystone.exception.ValidationError: If one of the constraints was not satisfied. :raises keystone.exception.DomainNotFound: In case the domain is not found. """ # Ensure domain_id is valid, and by inference will not be None. domain = self.get_domain(project_ref['domain_id']) parent_ref = self.get_project(project_ref['parent_id']) if parent_ref['is_domain']: if parent_ref['id'] != domain['id']: raise exception.ValidationError( message=_('Cannot create project, since its parent ' '(%(domain_id)s) is acting as a domain, ' 'but project\'s specified parent_id ' '(%(parent_id)s) does not match ' 'this domain_id.') % {'domain_id': domain['id'], 'parent_id': parent_ref['id']}) else: parent_domain_id = parent_ref.get('domain_id') if parent_domain_id != domain['id']: raise exception.ValidationError( message=_('Cannot create project, since it specifies ' 'its owner as domain %(domain_id)s, but ' 'specifies a parent in a different domain ' '(%(parent_domain_id)s).') % {'domain_id': domain['id'], 'parent_domain_id': parent_domain_id}) def _enforce_project_constraints(self, project_ref): if project_ref.get('is_domain'): self._assert_is_domain_project_constraints(project_ref) else: self._assert_regular_project_constraints(project_ref) # The whole hierarchy (upwards) must be enabled parent_id = project_ref['parent_id'] parents_list = self.list_project_parents(parent_id) parent_ref = self.get_project(parent_id) parents_list.append(parent_ref) for ref in parents_list: if not ref.get('enabled', True): raise exception.ValidationError( message=_('cannot create a project in a ' 'branch containing a disabled ' 'project: %s') % ref['id']) self._assert_max_hierarchy_depth(project_ref.get('parent_id'), parents_list) def _raise_reserved_character_exception(self, entity_type, name): msg = _('%(entity)s name cannot contain the following reserved ' 'characters: %(chars)s') raise exception.ValidationError( message=msg % { 'entity': entity_type, 'chars': utils.list_url_unsafe_chars(name) }) def _generate_project_name_conflict_msg(self, project): if project['is_domain']: return _('it is not permitted to have two projects ' 'acting as domains with the same name: %s' ) % project['name'] else: return _('it is not permitted to have two projects ' 'within a domain with the same name : %s' ) % project['name'] def create_project(self, project_id, project, initiator=None): project = project.copy() if (CONF.resource.project_name_url_safe != 'off' and utils.is_not_url_safe(project['name'])): self._raise_reserved_character_exception('Project', project['name']) project.setdefault('enabled', True) project['enabled'] = clean.project_enabled(project['enabled']) project.setdefault('description', '') # For regular projects, the controller will ensure we have a valid # domain_id. For projects acting as a domain, the project_id # is, effectively, the domain_id - and for such projects we don't # bother to store a copy of it in the domain_id attribute. project.setdefault('domain_id', None) project.setdefault('parent_id', None) if not project['parent_id']: project['parent_id'] = project['domain_id'] project.setdefault('is_domain', False) self._enforce_project_constraints(project) # We leave enforcing name uniqueness to the underlying driver (instead # of doing it in code in the project_constraints above), so as to allow # this check to be done at the storage level, avoiding race conditions # in multi-process keystone configurations. try: ret = self.driver.create_project(project_id, project) except exception.Conflict: raise exception.Conflict( type='project', details=self._generate_project_name_conflict_msg(project)) if project.get('is_domain'): notifications.Audit.created(self._DOMAIN, project_id, initiator) else: notifications.Audit.created(self._PROJECT, project_id, initiator) if MEMOIZE.should_cache(ret): self.get_project.set(ret, self, project_id) self.get_project_by_name.set(ret, self, ret['name'], ret['domain_id']) return ret def assert_domain_enabled(self, domain_id, domain=None): """Assert the Domain is enabled. :raise AssertionError: if domain is disabled. """ if domain is None: domain = self.get_domain(domain_id) if not domain.get('enabled', True): raise AssertionError(_('Domain is disabled: %s') % domain_id) def assert_domain_not_federated(self, domain_id, domain): """Assert the Domain's name and id do not match the reserved keyword. Note that the reserved keyword is defined in the configuration file, by default, it is 'Federated', it is also case insensitive. If config's option is empty the default hardcoded value 'Federated' will be used. :raise AssertionError: if domain named match the value in the config. """ # NOTE(marek-denis): We cannot create this attribute in the __init__ as # config values are always initialized to default value. federated_domain = CONF.federation.federated_domain_name.lower() if (domain.get('name') and domain['name'].lower() == federated_domain): raise AssertionError(_('Domain cannot be named %s') % domain['name']) if (domain_id.lower() == federated_domain): raise AssertionError(_('Domain cannot have ID %s') % domain_id) def assert_project_enabled(self, project_id, project=None): """Assert the project is enabled and its associated domain is enabled. :raise AssertionError: if the project or domain is disabled. """ if project is None: project = self.get_project(project_id) # If it's a regular project (i.e. it has a domain_id), we need to make # sure the domain itself is not disabled if project['domain_id']: self.assert_domain_enabled(domain_id=project['domain_id']) if not project.get('enabled', True): raise AssertionError(_('Project is disabled: %s') % project_id) def _assert_all_parents_are_enabled(self, project_id): parents_list = self.list_project_parents(project_id) for project in parents_list: if not project.get('enabled', True): raise exception.ForbiddenNotSecurity( _('Cannot enable project %s since it has disabled ' 'parents') % project_id) def _check_whole_subtree_is_disabled(self, project_id, subtree_list=None): if not subtree_list: subtree_list = self.list_projects_in_subtree(project_id) subtree_enabled = [ref.get('enabled', True) for ref in subtree_list] return (not any(subtree_enabled)) def _update_project(self, project_id, project, initiator=None, cascade=False): # Use the driver directly to prevent using old cached value. original_project = self.driver.get_project(project_id) project = project.copy() if original_project['is_domain']: domain = self._get_domain_from_project(original_project) self.assert_domain_not_federated(project_id, domain) if 'enabled' in domain: domain['enabled'] = clean.domain_enabled(domain['enabled']) url_safe_option = CONF.resource.domain_name_url_safe exception_entity = 'Domain' else: url_safe_option = CONF.resource.project_name_url_safe exception_entity = 'Project' if (url_safe_option != 'off' and 'name' in project and project['name'] != original_project['name'] and utils.is_not_url_safe(project['name'])): self._raise_reserved_character_exception(exception_entity, project['name']) parent_id = original_project.get('parent_id') if 'parent_id' in project and project.get('parent_id') != parent_id: raise exception.ForbiddenNotSecurity( _('Update of `parent_id` is not allowed.')) if ('is_domain' in project and project['is_domain'] != original_project['is_domain']): raise exception.ValidationError( message=_('Update of `is_domain` is not allowed.')) update_domain = ('domain_id' in project and project['domain_id'] != original_project['domain_id']) # NOTE(htruta): Even if we are allowing domain_ids to be # modified (i.e. 'domain_id_immutable' is set False), # a project.domain_id can only be updated for root projects # that have no children. The update of domain_id of a project in # the middle of the hierarchy creates an inconsistent project # hierarchy. if update_domain: if original_project['is_domain']: raise exception.ValidationError( message=_('Update of domain_id of projects acting as ' 'domains is not allowed.')) parent_project = ( self.driver.get_project(original_project['parent_id'])) is_root_project = parent_project['is_domain'] if not is_root_project: raise exception.ValidationError( message=_('Update of domain_id is only allowed for ' 'root projects.')) subtree_list = self.list_projects_in_subtree(project_id) if subtree_list: raise exception.ValidationError( message=_('Cannot update domain_id of a project that ' 'has children.')) versionutils.report_deprecated_feature( LOG, _('update of domain_id is deprecated as of Mitaka ' 'and will be removed in O.') ) if 'enabled' in project: project['enabled'] = clean.project_enabled(project['enabled']) original_project_enabled = original_project.get('enabled', True) project_enabled = project.get('enabled', True) if not original_project_enabled and project_enabled: self._assert_all_parents_are_enabled(project_id) if original_project_enabled and not project_enabled: # NOTE(htruta): In order to disable a regular project, all its # children must already be disabled. However, to keep # compatibility with the existing domain behaviour, we allow a # project acting as a domain to be disabled irrespective of the # state of its children. Disabling a project acting as domain # effectively disables its children. if (not original_project.get('is_domain') and not cascade and not self._check_whole_subtree_is_disabled(project_id)): raise exception.ForbiddenNotSecurity( _('Cannot disable project %(project_id)s since its ' 'subtree contains enabled projects.') % {'project_id': project_id}) notifications.Audit.disabled(self._PROJECT, project_id, public=False) if cascade: self._only_allow_enabled_to_update_cascade(project, original_project) self._update_project_enabled_cascade(project_id, project_enabled) try: ret = self.driver.update_project(project_id, project) except exception.Conflict: raise exception.Conflict( type='project', details=self._generate_project_name_conflict_msg(project)) notifications.Audit.updated(self._PROJECT, project_id, initiator) if original_project['is_domain']: notifications.Audit.updated(self._DOMAIN, project_id, initiator) # If the domain is being disabled, issue the disable notification # as well if original_project_enabled and not project_enabled: notifications.Audit.disabled(self._DOMAIN, project_id, public=False) self.get_project.invalidate(self, project_id) self.get_project_by_name.invalidate(self, original_project['name'], original_project['domain_id']) if ('domain_id' in project and project['domain_id'] != original_project['domain_id']): # If the project's domain_id has been updated, invalidate user # role assignments cache region, as it may be caching inherited # assignments from the old domain to the specified project assignment.COMPUTED_ASSIGNMENTS_REGION.invalidate() return ret def _only_allow_enabled_to_update_cascade(self, project, original_project): for attr in project: if attr != 'enabled': if project.get(attr) != original_project.get(attr): raise exception.ValidationError( message=_('Cascade update is only allowed for ' 'enabled attribute.')) def _update_project_enabled_cascade(self, project_id, enabled): subtree = self.list_projects_in_subtree(project_id) # Update enabled only if different from original value subtree_to_update = [child for child in subtree if child['enabled'] != enabled] for child in subtree_to_update: child['enabled'] = enabled if not enabled: # Does not in fact disable the project, only emits a # notification that it was disabled. The actual disablement # is done in the next line. notifications.Audit.disabled(self._PROJECT, child['id'], public=False) self.driver.update_project(child['id'], child) def update_project(self, project_id, project, initiator=None, cascade=False): ret = self._update_project(project_id, project, initiator, cascade) if ret['is_domain']: self.get_domain.invalidate(self, project_id) self.get_domain_by_name.invalidate(self, ret['name']) return ret def _pre_delete_cleanup_project(self, project_id, project, initiator=None): project_user_ids = ( self.assignment_api.list_user_ids_for_project(project_id)) for user_id in project_user_ids: payload = {'user_id': user_id, 'project_id': project_id} notifications.Audit.internal( notifications.INVALIDATE_USER_PROJECT_TOKEN_PERSISTENCE, payload ) def _post_delete_cleanup_project(self, project_id, project, initiator=None): self.assignment_api.delete_project_assignments(project_id) self.get_project.invalidate(self, project_id) self.get_project_by_name.invalidate(self, project['name'], project['domain_id']) self.credential_api.delete_credentials_for_project(project_id) notifications.Audit.deleted(self._PROJECT, project_id, initiator) # Invalidate user role assignments cache region, as it may # be caching role assignments where the target is # the specified project assignment.COMPUTED_ASSIGNMENTS_REGION.invalidate() def delete_project(self, project_id, initiator=None, cascade=False): project = self.driver.get_project(project_id) if project.get('is_domain'): self.delete_domain(project_id, initiator) else: self._delete_project(project_id, initiator, cascade) def _delete_project(self, project_id, initiator=None, cascade=False): # Use the driver directly to prevent using old cached value. project = self.driver.get_project(project_id) if project['is_domain'] and project['enabled']: raise exception.ValidationError( message=_('cannot delete an enabled project acting as a ' 'domain. Please disable the project %s first.') % project.get('id')) if not self.is_leaf_project(project_id) and not cascade: raise exception.ForbiddenNotSecurity( _('Cannot delete the project %s since it is not a leaf in the ' 'hierarchy. Use the cascade option if you want to delete a ' 'whole subtree.') % project_id) if cascade: # Getting reversed project's subtrees list, i.e. from the leaves # to the root, so we do not break parent_id FK. subtree_list = self.list_projects_in_subtree(project_id) subtree_list.reverse() if not self._check_whole_subtree_is_disabled( project_id, subtree_list=subtree_list): raise exception.ForbiddenNotSecurity( _('Cannot delete project %(project_id)s since its subtree ' 'contains enabled projects.') % {'project_id': project_id}) project_list = subtree_list + [project] projects_ids = [x['id'] for x in project_list] for prj in project_list: self._pre_delete_cleanup_project(prj['id'], prj, initiator) ret = self.driver.delete_projects_from_ids(projects_ids) for prj in project_list: self._post_delete_cleanup_project(prj['id'], prj, initiator) else: self._pre_delete_cleanup_project(project_id, project, initiator) ret = self.driver.delete_project(project_id) self._post_delete_cleanup_project(project_id, project, initiator) return ret def _filter_projects_list(self, projects_list, user_id): user_projects = self.assignment_api.list_projects_for_user(user_id) user_projects_ids = set([proj['id'] for proj in user_projects]) # Keep only the projects present in user_projects return [proj for proj in projects_list if proj['id'] in user_projects_ids] def _assert_valid_project_id(self, project_id): if project_id is None: msg = _('Project field is required and cannot be empty.') raise exception.ValidationError(message=msg) # Check if project_id exists self.get_project(project_id) def list_project_parents(self, project_id, user_id=None): self._assert_valid_project_id(project_id) parents = self.driver.list_project_parents(project_id) # If a user_id was provided, the returned list should be filtered # against the projects this user has access to. if user_id: parents = self._filter_projects_list(parents, user_id) return parents def _build_parents_as_ids_dict(self, project, parents_by_id): # NOTE(rodrigods): we don't rely in the order of the projects returned # by the list_project_parents() method. Thus, we create a project cache # (parents_by_id) in order to access each parent in constant time and # traverse up the hierarchy. def traverse_parents_hierarchy(project): parent_id = project.get('parent_id') if not parent_id: return None parent = parents_by_id[parent_id] return {parent_id: traverse_parents_hierarchy(parent)} return traverse_parents_hierarchy(project) def get_project_parents_as_ids(self, project): """Gets the IDs from the parents from a given project. The project IDs are returned as a structured dictionary traversing up the hierarchy to the top level project. For example, considering the following project hierarchy:: A | +-B-+ | | C D If we query for project C parents, the expected return is the following dictionary:: 'parents': { B['id']: { A['id']: None } } """ parents_list = self.list_project_parents(project['id']) parents_as_ids = self._build_parents_as_ids_dict( project, {proj['id']: proj for proj in parents_list}) return parents_as_ids def list_projects_in_subtree(self, project_id, user_id=None): self._assert_valid_project_id(project_id) subtree = self.driver.list_projects_in_subtree(project_id) # If a user_id was provided, the returned list should be filtered # against the projects this user has access to. if user_id: subtree = self._filter_projects_list(subtree, user_id) return subtree def _build_subtree_as_ids_dict(self, project_id, subtree_by_parent): # NOTE(rodrigods): we perform a depth first search to construct the # dictionaries representing each level of the subtree hierarchy. In # order to improve this traversal performance, we create a cache of # projects (subtree_py_parent) that accesses in constant time the # direct children of a given project. def traverse_subtree_hierarchy(project_id): children = subtree_by_parent.get(project_id) if not children: return None children_ids = {} for child in children: children_ids[child['id']] = traverse_subtree_hierarchy( child['id']) return children_ids return traverse_subtree_hierarchy(project_id) def get_projects_in_subtree_as_ids(self, project_id): """Gets the IDs from the projects in the subtree from a given project. The project IDs are returned as a structured dictionary representing their hierarchy. For example, considering the following project hierarchy:: A | +-B-+ | | C D If we query for project A subtree, the expected return is the following dictionary:: 'subtree': { B['id']: { C['id']: None, D['id']: None } } """ def _projects_indexed_by_parent(projects_list): projects_by_parent = {} for proj in projects_list: parent_id = proj.get('parent_id') if parent_id: if parent_id in projects_by_parent: projects_by_parent[parent_id].append(proj) else: projects_by_parent[parent_id] = [proj] return projects_by_parent subtree_list = self.list_projects_in_subtree(project_id) subtree_as_ids = self._build_subtree_as_ids_dict( project_id, _projects_indexed_by_parent(subtree_list)) return subtree_as_ids def list_domains_from_ids(self, domain_ids): """List domains for the provided list of ids. :param domain_ids: list of ids :returns: a list of domain_refs. This method is used internally by the assignment manager to bulk read a set of domains given their ids. """ # Retrieve the projects acting as domains get their correspondent # domains projects = self.list_projects_from_ids(domain_ids) domains = [self._get_domain_from_project(project) for project in projects] return domains @MEMOIZE def get_domain(self, domain_id): try: # Retrieve the corresponding project that acts as a domain project = self.driver.get_project(domain_id) except exception.ProjectNotFound: raise exception.DomainNotFound(domain_id=domain_id) # Return its correspondent domain return self._get_domain_from_project(project) @MEMOIZE def get_domain_by_name(self, domain_name): try: # Retrieve the corresponding project that acts as a domain project = self.driver.get_project_by_name(domain_name, domain_id=None) except exception.ProjectNotFound: raise exception.DomainNotFound(domain_id=domain_name) # Return its correspondent domain return self._get_domain_from_project(project) def _get_domain_from_project(self, project_ref): """Creates a domain ref from a project ref. Based on the provided project ref, create a domain ref, so that the result can be returned in response to a domain API call. """ if not project_ref['is_domain']: LOG.error(_LE('Asked to convert a non-domain project into a ' 'domain - Domain: %(domain_id)s, Project ID: ' '%(id)s, Project Name: %(project_name)s'), {'domain_id': project_ref['domain_id'], 'id': project_ref['id'], 'project_name': project_ref['name']}) raise exception.DomainNotFound(domain_id=project_ref['id']) domain_ref = project_ref.copy() # As well as the project specific attributes that we need to remove, # there is an old compatibility issue in that update project (as well # as extracting an extra attributes), also includes a copy of the # actual extra dict as well - something that update domain does not do. for k in ['parent_id', 'domain_id', 'is_domain', 'extra']: domain_ref.pop(k, None) return domain_ref def create_domain(self, domain_id, domain, initiator=None): if (CONF.resource.domain_name_url_safe != 'off' and utils.is_not_url_safe(domain['name'])): self._raise_reserved_character_exception('Domain', domain['name']) project_from_domain = _get_project_from_domain(domain) is_domain_project = self.create_project( domain_id, project_from_domain, initiator) return self._get_domain_from_project(is_domain_project) @manager.response_truncated def list_domains(self, hints=None): projects = self.list_projects_acting_as_domain(hints) domains = [self._get_domain_from_project(project) for project in projects] return domains def update_domain(self, domain_id, domain, initiator=None): # TODO(henry-nash): We shouldn't have to check for the federated domain # here as well as _update_project, but currently our tests assume the # checks are done in a specific order. The tests should be refactored. self.assert_domain_not_federated(domain_id, domain) project = _get_project_from_domain(domain) try: original_domain = self.driver.get_project(domain_id) project = self._update_project(domain_id, project, initiator) except exception.ProjectNotFound: raise exception.DomainNotFound(domain_id=domain_id) domain_from_project = self._get_domain_from_project(project) self.get_domain.invalidate(self, domain_id) self.get_domain_by_name.invalidate(self, original_domain['name']) return domain_from_project def delete_domain(self, domain_id, initiator=None): # Use the driver directly to get the project that acts as a domain and # prevent using old cached value. try: domain = self.driver.get_project(domain_id) except exception.ProjectNotFound: raise exception.DomainNotFound(domain_id=domain_id) # To help avoid inadvertent deletes, we insist that the domain # has been previously disabled. This also prevents a user deleting # their own domain since, once it is disabled, they won't be able # to get a valid token to issue this delete. if domain['enabled']: raise exception.ForbiddenNotSecurity( _('Cannot delete a domain that is enabled, please disable it ' 'first.')) self._delete_domain_contents(domain_id) self._delete_project(domain_id, initiator) # Delete any database stored domain config self.domain_config_api.delete_config_options(domain_id) self.domain_config_api.delete_config_options(domain_id, sensitive=True) self.domain_config_api.release_registration(domain_id) # TODO(henry-nash): Although the controller will ensure deletion of # all users & groups within the domain (which will cause all # assignments for those users/groups to also be deleted), there # could still be assignments on this domain for users/groups in # other domains - so we should delete these here by making a call # to the backend to delete all assignments for this domain. # (see Bug #1277847) notifications.Audit.deleted(self._DOMAIN, domain_id, initiator) self.get_domain.invalidate(self, domain_id) self.get_domain_by_name.invalidate(self, domain['name']) # Invalidate user role assignments cache region, as it may be caching # role assignments where the target is the specified domain assignment.COMPUTED_ASSIGNMENTS_REGION.invalidate() def _delete_domain_contents(self, domain_id): """Delete the contents of a domain. Before we delete a domain, we need to remove all the entities that are owned by it, i.e. Projects. To do this we call the delete function for these entities, which are themselves responsible for deleting any credentials and role grants associated with them as well as revoking any relevant tokens. """ def _delete_projects(project, projects, examined): if project['id'] in examined: msg = _LE('Circular reference or a repeated entry found ' 'projects hierarchy - %(project_id)s.') LOG.error(msg, {'project_id': project['id']}) return examined.add(project['id']) children = [proj for proj in projects if proj.get('parent_id') == project['id']] for proj in children: _delete_projects(proj, projects, examined) try: self.delete_project(project['id'], initiator=None) except exception.ProjectNotFound: LOG.debug(('Project %(projectid)s not found when ' 'deleting domain contents for %(domainid)s, ' 'continuing with cleanup.'), {'projectid': project['id'], 'domainid': domain_id}) proj_refs = self.list_projects_in_domain(domain_id) # Deleting projects recursively roots = [x for x in proj_refs if x.get('parent_id') == domain_id] examined = set() for project in roots: _delete_projects(project, proj_refs, examined) @manager.response_truncated def list_projects(self, hints=None): return self.driver.list_projects(hints or driver_hints.Hints()) # NOTE(henry-nash): list_projects_in_domain is actually an internal method # and not exposed via the API. Therefore there is no need to support # driver hints for it. def list_projects_in_domain(self, domain_id): return self.driver.list_projects_in_domain(domain_id) def list_projects_acting_as_domain(self, hints=None): return self.driver.list_projects_acting_as_domain( hints or driver_hints.Hints()) @MEMOIZE def get_project(self, project_id): return self.driver.get_project(project_id) @MEMOIZE def get_project_by_name(self, project_name, domain_id): return self.driver.get_project_by_name(project_name, domain_id) def ensure_default_domain_exists(self): """Creates the default domain if it doesn't exist. This is only used for the v2 API and can go away when V2 does. """ try: default_domain_attrs = { 'name': 'Default', 'id': CONF.identity.default_domain_id, 'description': 'Domain created automatically to support V2.0 ' 'operations.', } self.create_domain(CONF.identity.default_domain_id, default_domain_attrs) LOG.warning(_LW( 'The default domain was created automatically to contain V2 ' 'resources. This is deprecated in the M release and will not ' 'be supported in the O release. Create the default domain ' 'manually or use the keystone-manage bootstrap command.')) except exception.Conflict: LOG.debug('The default domain already exists.') except Exception: LOG.error(_LE('Failed to create the default domain.')) raise # The ResourceDriverBase class is the set of driver methods from earlier # drivers that we still support, that have not been removed or modified. This # class is then used to created the augmented V8 and V9 version abstract driver # classes, without having to duplicate a lot of abstract method signatures. # If you remove a method from V9, then move the abstract methods from this Base # class to the V8 class. Do not modify any of the method signatures in the Base # class - changes should only be made in the V8 and subsequent classes. # Starting with V9, some drivers use a special value to represent a domain_id # of None. See comment in Project class of resource/backends/sql.py for more # details. NULL_DOMAIN_ID = '<>' @six.add_metaclass(abc.ABCMeta) class ResourceDriverBase(object): def _get_list_limit(self): return CONF.resource.list_limit or CONF.list_limit # project crud @abc.abstractmethod def list_projects(self, hints): """List projects in the system. :param hints: filter hints which the driver should implement if at all possible. :returns: a list of project_refs or an empty list. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_projects_from_ids(self, project_ids): """List projects for the provided list of ids. :param project_ids: list of ids :returns: a list of project_refs. This method is used internally by the assignment manager to bulk read a set of projects given their ids. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_project_ids_from_domain_ids(self, domain_ids): """List project ids for the provided list of domain ids. :param domain_ids: list of domain ids :returns: a list of project ids owned by the specified domain ids. This method is used internally by the assignment manager to bulk read a set of project ids given a list of domain ids. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_projects_in_domain(self, domain_id): """List projects in the domain. :param domain_id: the driver MUST only return projects within this domain. :returns: a list of project_refs or an empty list. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_project(self, project_id): """Get a project by ID. :returns: project_ref :raises keystone.exception.ProjectNotFound: if project_id does not exist """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def update_project(self, project_id, project): """Updates an existing project. :raises keystone.exception.ProjectNotFound: if project_id does not exist :raises keystone.exception.Conflict: if project name already exists """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_project(self, project_id): """Deletes an existing project. :raises keystone.exception.ProjectNotFound: if project_id does not exist """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_project_parents(self, project_id): """List all parents from a project by its ID. :param project_id: the driver will list the parents of this project. :returns: a list of project_refs or an empty list. :raises keystone.exception.ProjectNotFound: if project_id does not exist """ raise exception.NotImplemented() @abc.abstractmethod def list_projects_in_subtree(self, project_id): """List all projects in the subtree of a given project. :param project_id: the driver will get the subtree under this project. :returns: a list of project_refs or an empty list :raises keystone.exception.ProjectNotFound: if project_id does not exist """ raise exception.NotImplemented() @abc.abstractmethod def is_leaf_project(self, project_id): """Checks if a project is a leaf in the hierarchy. :param project_id: the driver will check if this project is a leaf in the hierarchy. :raises keystone.exception.ProjectNotFound: if project_id does not exist """ raise exception.NotImplemented() def _validate_default_domain(self, ref): """Validate that either the default domain or nothing is specified. Also removes the domain from the ref so that LDAP doesn't have to persist the attribute. """ ref = ref.copy() domain_id = ref.pop('domain_id', CONF.identity.default_domain_id) self._validate_default_domain_id(domain_id) return ref def _validate_default_domain_id(self, domain_id): """Validate that the domain ID belongs to the default domain.""" if domain_id != CONF.identity.default_domain_id: raise exception.DomainNotFound(domain_id=domain_id) class ResourceDriverV8(ResourceDriverBase): """Removed or redefined methods from V8. Move the abstract methods of any methods removed or modified in later versions of the driver from ResourceDriverBase to here. We maintain this so that legacy drivers, which will be a subclass of ResourceDriverV8, can still reference them. """ @abc.abstractmethod def create_project(self, tenant_id, tenant): """Creates a new project. :param tenant_id: This parameter can be ignored. :param dict tenant: The new project Project schema:: type: object properties: id: type: string name: type: string domain_id: type: string description: type: string enabled: type: boolean parent_id: type: string is_domain: type: boolean required: [id, name, domain_id] additionalProperties: true If project doesn't match the schema the behavior is undefined. The driver can impose requirements such as the maximum length of a field. If these requirements are not met the behavior is undefined. :raises keystone.exception.Conflict: if the project id already exists or the name already exists for the domain_id. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_project_by_name(self, tenant_name, domain_id): """Get a tenant by name. :returns: tenant_ref :raises keystone.exception.ProjectNotFound: if a project with the tenant_name does not exist within the domain """ raise exception.NotImplemented() # pragma: no cover # Domain management functions for backends that only allow a single # domain. Although we no longer use this, a custom legacy driver might # have made use of it, so keep it here in case. def _set_default_domain(self, ref): """If the domain ID has not been set, set it to the default.""" if isinstance(ref, dict): if 'domain_id' not in ref: ref = ref.copy() ref['domain_id'] = CONF.identity.default_domain_id return ref elif isinstance(ref, list): return [self._set_default_domain(x) for x in ref] else: raise ValueError(_('Expected dict or list: %s') % type(ref)) # domain crud @abc.abstractmethod def create_domain(self, domain_id, domain): """Creates a new domain. :raises keystone.exception.Conflict: if the domain_id or domain name already exists """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_domains(self, hints): """List domains in the system. :param hints: filter hints which the driver should implement if at all possible. :returns: a list of domain_refs or an empty list. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_domains_from_ids(self, domain_ids): """List domains for the provided list of ids. :param domain_ids: list of ids :returns: a list of domain_refs. This method is used internally by the assignment manager to bulk read a set of domains given their ids. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_domain(self, domain_id): """Get a domain by ID. :returns: domain_ref :raises keystone.exception.DomainNotFound: if domain_id does not exist """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_domain_by_name(self, domain_name): """Get a domain by name. :returns: domain_ref :raises keystone.exception.DomainNotFound: if domain_name does not exist """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def update_domain(self, domain_id, domain): """Updates an existing domain. :raises keystone.exception.DomainNotFound: if domain_id does not exist :raises keystone.exception.Conflict: if domain name already exists """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_domain(self, domain_id): """Deletes an existing domain. :raises keystone.exception.DomainNotFound: if domain_id does not exist """ raise exception.NotImplemented() # pragma: no cover class ResourceDriverV9(ResourceDriverBase): """New or redefined methods from V8. Add any new V9 abstract methods (or those with modified signatures) to this class. """ @abc.abstractmethod def create_project(self, project_id, project): """Creates a new project. :param project_id: This parameter can be ignored. :param dict project: The new project Project schema:: type: object properties: id: type: string name: type: string domain_id: type: [string, null] description: type: string enabled: type: boolean parent_id: type: string is_domain: type: boolean required: [id, name, domain_id] additionalProperties: true If the project doesn't match the schema the behavior is undefined. The driver can impose requirements such as the maximum length of a field. If these requirements are not met the behavior is undefined. :raises keystone.exception.Conflict: if the project id already exists or the name already exists for the domain_id. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_project_by_name(self, project_name, domain_id): """Get a project by name. :returns: project_ref :raises keystone.exception.ProjectNotFound: if a project with the project_name does not exist within the domain """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_projects_from_ids(self, project_ids): """Deletes a given list of projects. Deletes a list of projects. Ensures no project on the list exists after it is successfully called. If an empty list is provided, the it is silently ignored. In addition, if a project ID in the list of project_ids is not found in the backend, no exception is raised, but a message is logged. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_projects_acting_as_domain(self, hints): """List all projects acting as domains. :param hints: filter hints which the driver should implement if at all possible. :returns: a list of project_refs or an empty list. """ raise exception.NotImplemented() # pragma: no cover class V9ResourceWrapperForV8Driver(ResourceDriverV9): """Wrapper class to supported a V8 legacy driver. In order to support legacy drivers without having to make the manager code driver-version aware, we wrap legacy drivers so that they look like the latest version. For the various changes made in a new driver, here are the actions needed in this wrapper: Method removed from new driver - remove the call-through method from this class, since the manager will no longer be calling it. Method signature (or meaning) changed - wrap the old method in a new signature here, and munge the input and output parameters accordingly. New method added to new driver - add a method to implement the new functionality here if possible. If that is not possible, then return NotImplemented, since we do not guarantee to support new functionality with legacy drivers. This wrapper contains the following support for newer manager code: - The current manager code expects domains to be represented as projects acting as domains, something that may not be possible in a legacy driver. Hence the wrapper will map any calls for projects acting as a domain back onto the driver domain methods. The caveat for this, is that this assumes that there can not be a clash between a project_id and a domain_id, in which case it may not be able to locate the correct entry. """ @versionutils.deprecated( as_of=versionutils.deprecated.MITAKA, what='keystone.resource.ResourceDriverV8', in_favor_of='keystone.resource.ResourceDriverV9', remove_in=+2) def __init__(self, wrapped_driver): self.driver = wrapped_driver def _get_domain_from_project(self, project_ref): """Creates a domain ref from a project ref. Based on the provided project ref (or partial ref), creates a domain ref, so that the result can be passed to the driver domain methods. """ domain_ref = project_ref.copy() for k in ['parent_id', 'domain_id', 'is_domain']: domain_ref.pop(k, None) return domain_ref def get_project_by_name(self, project_name, domain_id): if domain_id is None: try: domain_ref = self.driver.get_domain_by_name(project_name) return _get_project_from_domain(domain_ref) except exception.DomainNotFound: raise exception.ProjectNotFound(project_id=project_name) else: return self.driver.get_project_by_name(project_name, domain_id) def create_project(self, project_id, project): if project['is_domain']: new_domain = self._get_domain_from_project(project) domain_ref = self.driver.create_domain(project_id, new_domain) return _get_project_from_domain(domain_ref) else: return self.driver.create_project(project_id, project) def list_projects(self, hints): """List projects and/or domains. We use the hints filter to determine whether we are listing projects, domains or both. If the filter includes domain_id==None, then we should only list domains (convert to a project acting as a domain) since regular projcets always have a non-None value for domain_id. Likewise, if the filter includes domain_id==, then we should only list projects. If there is no domain_id filter, then we need to do a combained listing of domains and projects, converting domains to projects acting as a domain. """ domain_listing_filter = None for f in hints.filters: if (f['name'] == 'domain_id'): domain_listing_filter = f if domain_listing_filter is not None: if domain_listing_filter['value'] is not None: proj_list = self.driver.list_projects(hints) else: domains = self.driver.list_domains(hints) proj_list = [_get_project_from_domain(p) for p in domains] hints.filters.remove(domain_listing_filter) return proj_list else: # No domain_id filter, so combine domains and projects. Although # we hand any remaining filters into each driver, since each filter # might need to be carried out more than once, we use copies of the # filters, allowing the original filters to be passed back up to # controller level where a final filter will occur. local_hints = copy.deepcopy(hints) proj_list = self.driver.list_projects(local_hints) local_hints = copy.deepcopy(hints) domains = self.driver.list_domains(local_hints) for domain in domains: proj_list.append(_get_project_from_domain(domain)) return proj_list def list_projects_from_ids(self, project_ids): return [self.get_project(id) for id in project_ids] def list_project_ids_from_domain_ids(self, domain_ids): return self.driver.list_project_ids_from_domain_ids(domain_ids) def list_projects_in_domain(self, domain_id): return self.driver.list_projects_in_domain(domain_id) def get_project(self, project_id): try: domain_ref = self.driver.get_domain(project_id) return _get_project_from_domain(domain_ref) except exception.DomainNotFound: return self.driver.get_project(project_id) def _is_domain(self, project_id): ref = self.get_project(project_id) return ref.get('is_domain', False) def update_project(self, project_id, project): if self._is_domain(project_id): update_domain = self._get_domain_from_project(project) domain_ref = self.driver.update_domain(project_id, update_domain) return _get_project_from_domain(domain_ref) else: return self.driver.update_project(project_id, project) def delete_project(self, project_id): if self._is_domain(project_id): try: self.driver.delete_domain(project_id) except exception.DomainNotFound: raise exception.ProjectNotFound(project_id=project_id) else: self.driver.delete_project(project_id) def delete_projects_from_ids(self, project_ids): raise exception.NotImplemented() # pragma: no cover def list_project_parents(self, project_id): """List a project's ancestors. The current manager expects the ancestor tree to end with the project acting as the domain (since that's now the top of the tree), but a legacy driver will not have that top project in their projects table, since it's still in the domain table. Hence we lift the algorithm for traversing up the tree from the driver to here, so that our version of get_project() is called, which will fetch the "project" from the right table. """ project = self.get_project(project_id) parents = [] examined = set() while project.get('parent_id') is not None: if project['id'] in examined: msg = _LE('Circular reference or a repeated ' 'entry found in projects hierarchy - ' '%(project_id)s.') LOG.error(msg, {'project_id': project['id']}) return examined.add(project['id']) parent_project = self.get_project(project['parent_id']) parents.append(parent_project) project = parent_project return parents def list_projects_in_subtree(self, project_id): return self.driver.list_projects_in_subtree(project_id) def is_leaf_project(self, project_id): return self.driver.is_leaf_project(project_id) def list_projects_acting_as_domain(self, hints): refs = self.driver.list_domains(hints) return [_get_project_from_domain(p) for p in refs] Driver = manager.create_legacy_driver(ResourceDriverV8) MEMOIZE_CONFIG = cache.get_memoization_decorator(group='domain_config') @dependency.provider('domain_config_api') class DomainConfigManager(manager.Manager): """Default pivot point for the Domain Config backend.""" # NOTE(henry-nash): In order for a config option to be stored in the # standard table, it must be explicitly whitelisted. Options marked as # sensitive are stored in a separate table. Attempting to store options # that are not listed as either whitelisted or sensitive will raise an # exception. # # Only those options that affect the domain-specific driver support in # the identity manager are supported. driver_namespace = 'keystone.resource.domain_config' whitelisted_options = { 'identity': ['driver', 'list_limit'], 'ldap': [ 'url', 'user', 'suffix', 'use_dumb_member', 'dumb_member', 'allow_subtree_delete', 'query_scope', 'page_size', 'alias_dereferencing', 'debug_level', 'chase_referrals', 'user_tree_dn', 'user_filter', 'user_objectclass', 'user_id_attribute', 'user_name_attribute', 'user_mail_attribute', 'user_description_attribute', 'user_pass_attribute', 'user_enabled_attribute', 'user_enabled_invert', 'user_enabled_mask', 'user_enabled_default', 'user_attribute_ignore', 'user_default_project_id_attribute', 'user_allow_create', 'user_allow_update', 'user_allow_delete', 'user_enabled_emulation', 'user_enabled_emulation_dn', 'user_enabled_emulation_use_group_config', 'user_additional_attribute_mapping', 'group_tree_dn', 'group_filter', 'group_objectclass', 'group_id_attribute', 'group_name_attribute', 'group_member_attribute', 'group_desc_attribute', 'group_attribute_ignore', 'group_allow_create', 'group_allow_update', 'group_allow_delete', 'group_additional_attribute_mapping', 'tls_cacertfile', 'tls_cacertdir', 'use_tls', 'tls_req_cert', 'use_pool', 'pool_size', 'pool_retry_max', 'pool_retry_delay', 'pool_connection_timeout', 'pool_connection_lifetime', 'use_auth_pool', 'auth_pool_size', 'auth_pool_connection_lifetime' ] } sensitive_options = { 'identity': [], 'ldap': ['password'] } def __init__(self): super(DomainConfigManager, self).__init__(CONF.domain_config.driver) def _assert_valid_config(self, config): """Ensure the options in the config are valid. This method is called to validate the request config in create and update manager calls. :param config: config structure being created or updated """ # Something must be defined in the request if not config: raise exception.InvalidDomainConfig( reason=_('No options specified')) # Make sure the groups/options defined in config itself are valid for group in config: if (not config[group] or not isinstance(config[group], dict)): msg = _('The value of group %(group)s specified in the ' 'config should be a dictionary of options') % { 'group': group} raise exception.InvalidDomainConfig(reason=msg) for option in config[group]: self._assert_valid_group_and_option(group, option) def _assert_valid_group_and_option(self, group, option): """Ensure the combination of group and option is valid. :param group: optional group name, if specified it must be one we support :param option: optional option name, if specified it must be one we support and a group must also be specified """ if not group and not option: # For all calls, it's OK for neither to be defined, it means you # are operating on all config options for that domain. return if not group and option: # Our API structure should prevent this from ever happening, so if # it does, then this is coding error. msg = _('Option %(option)s found with no group specified while ' 'checking domain configuration request') % { 'option': option} raise exception.UnexpectedError(exception=msg) if (group and group not in self.whitelisted_options and group not in self.sensitive_options): msg = _('Group %(group)s is not supported ' 'for domain specific configurations') % {'group': group} raise exception.InvalidDomainConfig(reason=msg) if option: if (option not in self.whitelisted_options[group] and option not in self.sensitive_options[group]): msg = _('Option %(option)s in group %(group)s is not ' 'supported for domain specific configurations') % { 'group': group, 'option': option} raise exception.InvalidDomainConfig(reason=msg) def _is_sensitive(self, group, option): return option in self.sensitive_options[group] def _config_to_list(self, config): """Build whitelisted and sensitive lists for use by backend drivers.""" whitelisted = [] sensitive = [] for group in config: for option in config[group]: the_list = (sensitive if self._is_sensitive(group, option) else whitelisted) the_list.append({ 'group': group, 'option': option, 'value': config[group][option]}) return whitelisted, sensitive def _list_to_config(self, whitelisted, sensitive=None, req_option=None): """Build config dict from a list of option dicts. :param whitelisted: list of dicts containing options and their groups, this has already been filtered to only contain those options to include in the output. :param sensitive: list of dicts containing sensitive options and their groups, this has already been filtered to only contain those options to include in the output. :param req_option: the individual option requested :returns: a config dict, including sensitive if specified """ the_list = whitelisted + (sensitive or []) if not the_list: return {} if req_option: # The request was specific to an individual option, so # no need to include the group in the output. We first check that # there is only one option in the answer (and that it's the right # one) - if not, something has gone wrong and we raise an error if len(the_list) > 1 or the_list[0]['option'] != req_option: LOG.error(_LE('Unexpected results in response for domain ' 'config - %(count)s responses, first option is ' '%(option)s, expected option %(expected)s'), {'count': len(the_list), 'option': list[0]['option'], 'expected': req_option}) raise exception.UnexpectedError( _('An unexpected error occurred when retrieving domain ' 'configs')) return {the_list[0]['option']: the_list[0]['value']} config = {} for option in the_list: config.setdefault(option['group'], {}) config[option['group']][option['option']] = option['value'] return config def create_config(self, domain_id, config): """Create config for a domain :param domain_id: the domain in question :param config: the dict of config groups/options to assign to the domain Creates a new config, overwriting any previous config (no Conflict error will be generated). :returns: a dict of group dicts containing the options, with any that are sensitive removed :raises keystone.exception.InvalidDomainConfig: when the config contains options we do not support """ self._assert_valid_config(config) whitelisted, sensitive = self._config_to_list(config) # Delete any existing config self.delete_config_options(domain_id) self.delete_config_options(domain_id, sensitive=True) # ...and create the new one for option in whitelisted: self.create_config_option( domain_id, option['group'], option['option'], option['value']) for option in sensitive: self.create_config_option( domain_id, option['group'], option['option'], option['value'], sensitive=True) # Since we are caching on the full substituted config, we just # invalidate here, rather than try and create the right result to # cache. self.get_config_with_sensitive_info.invalidate(self, domain_id) return self._list_to_config(whitelisted) def get_config(self, domain_id, group=None, option=None): """Get config, or partial config, for a domain :param domain_id: the domain in question :param group: an optional specific group of options :param option: an optional specific option within the group :returns: a dict of group dicts containing the whitelisted options, filtered by group and option specified :raises keystone.exception.DomainConfigNotFound: when no config found that matches domain_id, group and option specified :raises keystone.exception.InvalidDomainConfig: when the config and group/option parameters specify an option we do not support An example response:: { 'ldap': { 'url': 'myurl' 'user_tree_dn': 'OU=myou'}, 'identity': { 'driver': 'ldap'} } """ self._assert_valid_group_and_option(group, option) whitelisted = self.list_config_options(domain_id, group, option) if whitelisted: return self._list_to_config(whitelisted, req_option=option) if option: msg = _('option %(option)s in group %(group)s') % { 'group': group, 'option': option} elif group: msg = _('group %(group)s') % {'group': group} else: msg = _('any options') raise exception.DomainConfigNotFound( domain_id=domain_id, group_or_option=msg) def update_config(self, domain_id, config, group=None, option=None): """Update config, or partial config, for a domain :param domain_id: the domain in question :param config: the config dict containing and groups/options being updated :param group: an optional specific group of options, which if specified must appear in config, with no other groups :param option: an optional specific option within the group, which if specified must appear in config, with no other options The contents of the supplied config will be merged with the existing config for this domain, updating or creating new options if these did not previously exist. If group or option is specified, then the update will be limited to those specified items and the inclusion of other options in the supplied config will raise an exception, as will the situation when those options do not already exist in the current config. :returns: a dict of groups containing all whitelisted options :raises keystone.exception.InvalidDomainConfig: when the config and group/option parameters specify an option we do not support or one that does not exist in the original config """ def _assert_valid_update(domain_id, config, group=None, option=None): """Ensure the combination of config, group and option is valid.""" self._assert_valid_config(config) self._assert_valid_group_and_option(group, option) # If a group has been specified, then the request is to # explicitly only update the options in that group - so the config # must not contain anything else. Further, that group must exist in # the original config. Likewise, if an option has been specified, # then the group in the config must only contain that option and it # also must exist in the original config. if group: if len(config) != 1 or (option and len(config[group]) != 1): if option: msg = _('Trying to update option %(option)s in group ' '%(group)s, so that, and only that, option ' 'must be specified in the config') % { 'group': group, 'option': option} else: msg = _('Trying to update group %(group)s, so that, ' 'and only that, group must be specified in ' 'the config') % {'group': group} raise exception.InvalidDomainConfig(reason=msg) # So we now know we have the right number of entries in the # config that align with a group/option being specified, but we # must also make sure they match. if group not in config: msg = _('request to update group %(group)s, but config ' 'provided contains group %(group_other)s ' 'instead') % { 'group': group, 'group_other': list(config.keys())[0]} raise exception.InvalidDomainConfig(reason=msg) if option and option not in config[group]: msg = _('Trying to update option %(option)s in group ' '%(group)s, but config provided contains option ' '%(option_other)s instead') % { 'group': group, 'option': option, 'option_other': list(config[group].keys())[0]} raise exception.InvalidDomainConfig(reason=msg) # Finally, we need to check if the group/option specified # already exists in the original config - since if not, to keep # with the semantics of an update, we need to fail with # a DomainConfigNotFound if not self._get_config_with_sensitive_info(domain_id, group, option): if option: msg = _('option %(option)s in group %(group)s') % { 'group': group, 'option': option} raise exception.DomainConfigNotFound( domain_id=domain_id, group_or_option=msg) else: msg = _('group %(group)s') % {'group': group} raise exception.DomainConfigNotFound( domain_id=domain_id, group_or_option=msg) def _update_or_create(domain_id, option, sensitive): """Update the option, if it doesn't exist then create it.""" try: self.create_config_option( domain_id, option['group'], option['option'], option['value'], sensitive=sensitive) except exception.Conflict: self.update_config_option( domain_id, option['group'], option['option'], option['value'], sensitive=sensitive) update_config = config if group and option: # The config will just be a dict containing the option and # its value, so make it look like a single option under the # group in question update_config = {group: config} _assert_valid_update(domain_id, update_config, group, option) whitelisted, sensitive = self._config_to_list(update_config) for new_option in whitelisted: _update_or_create(domain_id, new_option, sensitive=False) for new_option in sensitive: _update_or_create(domain_id, new_option, sensitive=True) self.get_config_with_sensitive_info.invalidate(self, domain_id) return self.get_config(domain_id) def delete_config(self, domain_id, group=None, option=None): """Delete config, or partial config, for the domain. :param domain_id: the domain in question :param group: an optional specific group of options :param option: an optional specific option within the group If group and option are None, then the entire config for the domain is deleted. If group is not None, then just that group of options will be deleted. If group and option are both specified, then just that option is deleted. :raises keystone.exception.InvalidDomainConfig: when group/option parameters specify an option we do not support or one that does not exist in the original config. """ self._assert_valid_group_and_option(group, option) if group: # As this is a partial delete, then make sure the items requested # are valid and exist in the current config current_config = self._get_config_with_sensitive_info(domain_id) # Raise an exception if the group/options specified don't exist in # the current config so that the delete method provides the # correct error semantics. current_group = current_config.get(group) if not current_group: msg = _('group %(group)s') % {'group': group} raise exception.DomainConfigNotFound( domain_id=domain_id, group_or_option=msg) if option and not current_group.get(option): msg = _('option %(option)s in group %(group)s') % { 'group': group, 'option': option} raise exception.DomainConfigNotFound( domain_id=domain_id, group_or_option=msg) self.delete_config_options(domain_id, group, option) self.delete_config_options(domain_id, group, option, sensitive=True) self.get_config_with_sensitive_info.invalidate(self, domain_id) def _get_config_with_sensitive_info(self, domain_id, group=None, option=None): """Get config for a domain/group/option with sensitive info included. This is only used by the methods within this class, which may need to check individual groups or options. """ whitelisted = self.list_config_options(domain_id, group, option) sensitive = self.list_config_options(domain_id, group, option, sensitive=True) # Check if there are any sensitive substitutions needed. We first try # and simply ensure any sensitive options that have valid substitution # references in the whitelisted options are substituted. We then check # the resulting whitelisted option and raise a warning if there # appears to be an unmatched or incorrectly constructed substitution # reference. To avoid the risk of logging any sensitive options that # have already been substituted, we first take a copy of the # whitelisted option. # Build a dict of the sensitive options ready to try substitution sensitive_dict = {s['option']: s['value'] for s in sensitive} for each_whitelisted in whitelisted: if not isinstance(each_whitelisted['value'], six.string_types): # We only support substitutions into string types, if its an # integer, list etc. then just continue onto the next one continue # Store away the original value in case we need to raise a warning # after substitution. original_value = each_whitelisted['value'] warning_msg = '' try: each_whitelisted['value'] = ( each_whitelisted['value'] % sensitive_dict) except KeyError: warning_msg = _LW( 'Found what looks like an unmatched config option ' 'substitution reference - domain: %(domain)s, group: ' '%(group)s, option: %(option)s, value: %(value)s. Perhaps ' 'the config option to which it refers has yet to be ' 'added?') except (ValueError, TypeError): warning_msg = _LW( 'Found what looks like an incorrectly constructed ' 'config option substitution reference - domain: ' '%(domain)s, group: %(group)s, option: %(option)s, ' 'value: %(value)s.') if warning_msg: LOG.warning(warning_msg % { 'domain': domain_id, 'group': each_whitelisted['group'], 'option': each_whitelisted['option'], 'value': original_value}) return self._list_to_config(whitelisted, sensitive) @MEMOIZE_CONFIG def get_config_with_sensitive_info(self, domain_id): """Get config for a domain with sensitive info included. This method is not exposed via the public API, but is used by the identity manager to initialize a domain with the fully formed config options. """ return self._get_config_with_sensitive_info(domain_id) def get_config_default(self, group=None, option=None): """Get default config, or partial default config :param group: an optional specific group of options :param option: an optional specific option within the group :returns: a dict of group dicts containing the default options, filtered by group and option if specified :raises keystone.exception.InvalidDomainConfig: when the config and group/option parameters specify an option we do not support (or one that is not whitelisted). An example response:: { 'ldap': { 'url': 'myurl', 'user_tree_dn': 'OU=myou', ....}, 'identity': { 'driver': 'ldap'} } """ def _option_dict(group, option): group_attr = getattr(CONF, group) if group_attr is None: msg = _('Group %s not found in config') % group raise exception.UnexpectedError(msg) return {'group': group, 'option': option, 'value': getattr(group_attr, option)} self._assert_valid_group_and_option(group, option) config_list = [] if group: if option: if option not in self.whitelisted_options[group]: msg = _('Reading the default for option %(option)s in ' 'group %(group)s is not supported') % { 'option': option, 'group': group} raise exception.InvalidDomainConfig(reason=msg) config_list.append(_option_dict(group, option)) else: for each_option in self.whitelisted_options[group]: config_list.append(_option_dict(group, each_option)) else: for each_group in self.whitelisted_options: for each_option in self.whitelisted_options[each_group]: config_list.append(_option_dict(each_group, each_option)) return self._list_to_config(config_list, req_option=option) @six.add_metaclass(abc.ABCMeta) class DomainConfigDriverV8(object): """Interface description for a Domain Config driver.""" @abc.abstractmethod def create_config_option(self, domain_id, group, option, value, sensitive=False): """Creates a config option for a domain. :param domain_id: the domain for this option :param group: the group name :param option: the option name :param value: the value to assign to this option :param sensitive: whether the option is sensitive :returns: dict containing group, option and value :raises keystone.exception.Conflict: when the option already exists """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_config_option(self, domain_id, group, option, sensitive=False): """Gets the config option for a domain. :param domain_id: the domain for this option :param group: the group name :param option: the option name :param sensitive: whether the option is sensitive :returns: dict containing group, option and value :raises keystone.exception.DomainConfigNotFound: the option doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_config_options(self, domain_id, group=None, option=False, sensitive=False): """Gets a config options for a domain. :param domain_id: the domain for this option :param group: optional group option name :param option: optional option name. If group is None, then this parameter is ignored :param sensitive: whether the option is sensitive :returns: list of dicts containing group, option and value """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def update_config_option(self, domain_id, group, option, value, sensitive=False): """Updates a config option for a domain. :param domain_id: the domain for this option :param group: the group option name :param option: the option name :param value: the value to assign to this option :param sensitive: whether the option is sensitive :returns: dict containing updated group, option and value :raises keystone.exception.DomainConfigNotFound: the option doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_config_options(self, domain_id, group=None, option=None, sensitive=False): """Deletes config options for a domain. Allows deletion of all options for a domain, all options in a group or a specific option. The driver is silent if there are no options to delete. :param domain_id: the domain for this option :param group: optional group option name :param option: optional option name. If group is None, then this parameter is ignored :param sensitive: whether the option is sensitive """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def obtain_registration(self, domain_id, type): """Try and register this domain to use the type specified. :param domain_id: the domain required :param type: type of registration :returns: True if the domain was registered, False otherwise. Failing to register means that someone already has it (which could even be the domain being requested). """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def read_registration(self, type): """Get the domain ID of who is registered to use this type. :param type: type of registration :returns: domain_id of who is registered. :raises keystone.exception.ConfigRegistrationNotFound: If nobody is registered. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def release_registration(self, domain_id, type=None): """Release registration if it is held by the domain specified. If the specified domain is registered for this domain then free it, if it is not then do nothing - no exception is raised. :param domain_id: the domain in question :param type: type of registration, if None then all registrations for this domain will be freed """ raise exception.NotImplemented() # pragma: no cover DomainConfigDriver = manager.create_legacy_driver(DomainConfigDriverV8) keystone-9.0.0/keystone/resource/config_backends/0000775000567000056710000000000012701407246023314 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/resource/config_backends/__init__.py0000664000567000056710000000000012701407102025402 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/resource/config_backends/sql.py0000664000567000056710000001367612701407105024474 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common import sql from keystone import exception from keystone.i18n import _ from keystone import resource class WhiteListedConfig(sql.ModelBase, sql.ModelDictMixin): __tablename__ = 'whitelisted_config' domain_id = sql.Column(sql.String(64), primary_key=True) group = sql.Column(sql.String(255), primary_key=True) option = sql.Column(sql.String(255), primary_key=True) value = sql.Column(sql.JsonBlob(), nullable=False) def to_dict(self): d = super(WhiteListedConfig, self).to_dict() d.pop('domain_id') return d class SensitiveConfig(sql.ModelBase, sql.ModelDictMixin): __tablename__ = 'sensitive_config' domain_id = sql.Column(sql.String(64), primary_key=True) group = sql.Column(sql.String(255), primary_key=True) option = sql.Column(sql.String(255), primary_key=True) value = sql.Column(sql.JsonBlob(), nullable=False) def to_dict(self): d = super(SensitiveConfig, self).to_dict() d.pop('domain_id') return d class ConfigRegister(sql.ModelBase, sql.ModelDictMixin): __tablename__ = 'config_register' type = sql.Column(sql.String(64), primary_key=True) domain_id = sql.Column(sql.String(64), nullable=False) class DomainConfig(resource.DomainConfigDriverV8): def choose_table(self, sensitive): if sensitive: return SensitiveConfig else: return WhiteListedConfig @sql.handle_conflicts(conflict_type='domain_config') def create_config_option(self, domain_id, group, option, value, sensitive=False): with sql.session_for_write() as session: config_table = self.choose_table(sensitive) ref = config_table(domain_id=domain_id, group=group, option=option, value=value) session.add(ref) return ref.to_dict() def _get_config_option(self, session, domain_id, group, option, sensitive): try: config_table = self.choose_table(sensitive) ref = (session.query(config_table). filter_by(domain_id=domain_id, group=group, option=option).one()) except sql.NotFound: msg = _('option %(option)s in group %(group)s') % { 'group': group, 'option': option} raise exception.DomainConfigNotFound( domain_id=domain_id, group_or_option=msg) return ref def get_config_option(self, domain_id, group, option, sensitive=False): with sql.session_for_read() as session: ref = self._get_config_option(session, domain_id, group, option, sensitive) return ref.to_dict() def list_config_options(self, domain_id, group=None, option=None, sensitive=False): with sql.session_for_read() as session: config_table = self.choose_table(sensitive) query = session.query(config_table) query = query.filter_by(domain_id=domain_id) if group: query = query.filter_by(group=group) if option: query = query.filter_by(option=option) return [ref.to_dict() for ref in query.all()] def update_config_option(self, domain_id, group, option, value, sensitive=False): with sql.session_for_write() as session: ref = self._get_config_option(session, domain_id, group, option, sensitive) ref.value = value return ref.to_dict() def delete_config_options(self, domain_id, group=None, option=None, sensitive=False): """Deletes config options that match the filter parameters. Since the public API is broken down into calls for delete in both the whitelisted and sensitive methods, we are silent at the driver level if there was nothing to delete. """ with sql.session_for_write() as session: config_table = self.choose_table(sensitive) query = session.query(config_table) query = query.filter_by(domain_id=domain_id) if group: query = query.filter_by(group=group) if option: query = query.filter_by(option=option) query.delete(False) def obtain_registration(self, domain_id, type): try: with sql.session_for_write() as session: ref = ConfigRegister(type=type, domain_id=domain_id) session.add(ref) return True except sql.DBDuplicateEntry: # nosec # Continue on and return False to indicate failure. pass return False def read_registration(self, type): with sql.session_for_read() as session: ref = session.query(ConfigRegister).get(type) if not ref: raise exception.ConfigRegistrationNotFound() return ref.domain_id def release_registration(self, domain_id, type=None): """Silently delete anything registered for the domain specified.""" with sql.session_for_write() as session: query = session.query(ConfigRegister) if type: query = query.filter_by(type=type) query = query.filter_by(domain_id=domain_id) query.delete(False) keystone-9.0.0/keystone/resource/controllers.py0000664000567000056710000003315712701407102023135 0ustar jenkinsjenkins00000000000000# Copyright 2013 Metacloud, Inc. # Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Workflow Logic the Resource service.""" import uuid from oslo_config import cfg from keystone.common import controller from keystone.common import dependency from keystone.common import validation from keystone.common import wsgi from keystone import exception from keystone.i18n import _ from keystone import notifications from keystone.resource import schema CONF = cfg.CONF @dependency.requires('resource_api') class Tenant(controller.V2Controller): @controller.v2_deprecated def get_all_projects(self, context, **kw): """Gets a list of all tenants for an admin user.""" self.assert_admin(context) if 'name' in context['query_string']: return self._get_project_by_name(context['query_string']['name']) try: tenant_refs = self.resource_api.list_projects_in_domain( CONF.identity.default_domain_id) except exception.DomainNotFound: # If the default domain doesn't exist then there are no V2 # projects. tenant_refs = [] tenant_refs = [self.v3_to_v2_project(tenant_ref) for tenant_ref in tenant_refs if not tenant_ref.get('is_domain')] params = { 'limit': context['query_string'].get('limit'), 'marker': context['query_string'].get('marker'), } return self.format_project_list(tenant_refs, **params) def _assert_not_is_domain_project(self, project_id, project_ref=None): # Projects acting as a domain should not be visible via v2 if not project_ref: project_ref = self.resource_api.get_project(project_id) if project_ref.get('is_domain'): raise exception.ProjectNotFound(project_id) @controller.v2_deprecated def get_project(self, context, tenant_id): # TODO(termie): this stuff should probably be moved to middleware self.assert_admin(context) ref = self.resource_api.get_project(tenant_id) self._assert_not_is_domain_project(tenant_id, ref) return {'tenant': self.v3_to_v2_project(ref)} def _get_project_by_name(self, tenant_name): # Projects acting as a domain should not be visible via v2 ref = self.resource_api.get_project_by_name( tenant_name, CONF.identity.default_domain_id) self._assert_not_is_domain_project(ref['id'], ref) return {'tenant': self.v3_to_v2_project(ref)} # CRUD Extension @controller.v2_deprecated def create_project(self, context, tenant): tenant_ref = self._normalize_dict(tenant) if 'name' not in tenant_ref or not tenant_ref['name']: msg = _('Name field is required and cannot be empty') raise exception.ValidationError(message=msg) if 'is_domain' in tenant_ref: msg = _('The creation of projects acting as domains is not ' 'allowed in v2.') raise exception.ValidationError(message=msg) self.assert_admin(context) self.resource_api.ensure_default_domain_exists() tenant_ref['id'] = tenant_ref.get('id', uuid.uuid4().hex) initiator = notifications._get_request_audit_info(context) tenant = self.resource_api.create_project( tenant_ref['id'], self._normalize_domain_id(context, tenant_ref), initiator) return {'tenant': self.v3_to_v2_project(tenant)} @controller.v2_deprecated def update_project(self, context, tenant_id, tenant): self.assert_admin(context) self._assert_not_is_domain_project(tenant_id) # Remove domain_id and is_domain if specified - a v2 api caller # should not be specifying that clean_tenant = tenant.copy() clean_tenant.pop('domain_id', None) clean_tenant.pop('is_domain', None) initiator = notifications._get_request_audit_info(context) tenant_ref = self.resource_api.update_project( tenant_id, clean_tenant, initiator) return {'tenant': self.v3_to_v2_project(tenant_ref)} @controller.v2_deprecated def delete_project(self, context, tenant_id): self.assert_admin(context) self._assert_not_is_domain_project(tenant_id) initiator = notifications._get_request_audit_info(context) self.resource_api.delete_project(tenant_id, initiator) @dependency.requires('resource_api') class DomainV3(controller.V3Controller): collection_name = 'domains' member_name = 'domain' def __init__(self): super(DomainV3, self).__init__() self.get_member_from_driver = self.resource_api.get_domain @controller.protected() @validation.validated(schema.domain_create, 'domain') def create_domain(self, context, domain): ref = self._assign_unique_id(self._normalize_dict(domain)) initiator = notifications._get_request_audit_info(context) ref = self.resource_api.create_domain(ref['id'], ref, initiator) return DomainV3.wrap_member(context, ref) @controller.filterprotected('enabled', 'name') def list_domains(self, context, filters): hints = DomainV3.build_driver_hints(context, filters) refs = self.resource_api.list_domains(hints=hints) return DomainV3.wrap_collection(context, refs, hints=hints) @controller.protected() def get_domain(self, context, domain_id): ref = self.resource_api.get_domain(domain_id) return DomainV3.wrap_member(context, ref) @controller.protected() @validation.validated(schema.domain_update, 'domain') def update_domain(self, context, domain_id, domain): self._require_matching_id(domain_id, domain) initiator = notifications._get_request_audit_info(context) ref = self.resource_api.update_domain(domain_id, domain, initiator) return DomainV3.wrap_member(context, ref) @controller.protected() def delete_domain(self, context, domain_id): initiator = notifications._get_request_audit_info(context) return self.resource_api.delete_domain(domain_id, initiator) @dependency.requires('domain_config_api') @dependency.requires('resource_api') class DomainConfigV3(controller.V3Controller): member_name = 'config' @controller.protected() def create_domain_config(self, context, domain_id, config): self.resource_api.get_domain(domain_id) original_config = ( self.domain_config_api.get_config_with_sensitive_info(domain_id)) ref = self.domain_config_api.create_config(domain_id, config) if original_config: # Return status code 200, since config already existed return wsgi.render_response(body={self.member_name: ref}) else: return wsgi.render_response(body={self.member_name: ref}, status=('201', 'Created')) @controller.protected() def get_domain_config(self, context, domain_id, group=None, option=None): self.resource_api.get_domain(domain_id) ref = self.domain_config_api.get_config(domain_id, group, option) return {self.member_name: ref} @controller.protected() def update_domain_config( self, context, domain_id, config, group, option): self.resource_api.get_domain(domain_id) ref = self.domain_config_api.update_config( domain_id, config, group, option) return wsgi.render_response(body={self.member_name: ref}) def update_domain_config_group(self, context, domain_id, group, config): self.resource_api.get_domain(domain_id) return self.update_domain_config( context, domain_id, config, group, option=None) def update_domain_config_only(self, context, domain_id, config): self.resource_api.get_domain(domain_id) return self.update_domain_config( context, domain_id, config, group=None, option=None) @controller.protected() def delete_domain_config( self, context, domain_id, group=None, option=None): self.resource_api.get_domain(domain_id) self.domain_config_api.delete_config(domain_id, group, option) @controller.protected() def get_domain_config_default(self, context, group=None, option=None): ref = self.domain_config_api.get_config_default(group, option) return {self.member_name: ref} @dependency.requires('resource_api') class ProjectV3(controller.V3Controller): collection_name = 'projects' member_name = 'project' def __init__(self): super(ProjectV3, self).__init__() self.get_member_from_driver = self.resource_api.get_project @controller.protected() @validation.validated(schema.project_create, 'project') def create_project(self, context, project): ref = self._assign_unique_id(self._normalize_dict(project)) if not ref.get('is_domain'): ref = self._normalize_domain_id(context, ref) # Our API requires that you specify the location in the hierarchy # unambiguously. This could be by parent_id or, if it is a top level # project, just by providing a domain_id. if not ref.get('parent_id'): ref['parent_id'] = ref.get('domain_id') initiator = notifications._get_request_audit_info(context) try: ref = self.resource_api.create_project(ref['id'], ref, initiator=initiator) except (exception.DomainNotFound, exception.ProjectNotFound) as e: raise exception.ValidationError(e) return ProjectV3.wrap_member(context, ref) @controller.filterprotected('domain_id', 'enabled', 'name', 'parent_id', 'is_domain') def list_projects(self, context, filters): hints = ProjectV3.build_driver_hints(context, filters) # If 'is_domain' has not been included as a query, we default it to # False (which in query terms means '0' if 'is_domain' not in context['query_string']: hints.add_filter('is_domain', '0') refs = self.resource_api.list_projects(hints=hints) return ProjectV3.wrap_collection(context, refs, hints=hints) def _expand_project_ref(self, context, ref): params = context['query_string'] parents_as_list = 'parents_as_list' in params and ( self.query_filter_is_true(params['parents_as_list'])) parents_as_ids = 'parents_as_ids' in params and ( self.query_filter_is_true(params['parents_as_ids'])) subtree_as_list = 'subtree_as_list' in params and ( self.query_filter_is_true(params['subtree_as_list'])) subtree_as_ids = 'subtree_as_ids' in params and ( self.query_filter_is_true(params['subtree_as_ids'])) # parents_as_list and parents_as_ids are mutually exclusive if parents_as_list and parents_as_ids: msg = _('Cannot use parents_as_list and parents_as_ids query ' 'params at the same time.') raise exception.ValidationError(msg) # subtree_as_list and subtree_as_ids are mutually exclusive if subtree_as_list and subtree_as_ids: msg = _('Cannot use subtree_as_list and subtree_as_ids query ' 'params at the same time.') raise exception.ValidationError(msg) user_id = self.get_auth_context(context).get('user_id') if parents_as_list: parents = self.resource_api.list_project_parents( ref['id'], user_id) ref['parents'] = [ProjectV3.wrap_member(context, p) for p in parents] elif parents_as_ids: ref['parents'] = self.resource_api.get_project_parents_as_ids(ref) if subtree_as_list: subtree = self.resource_api.list_projects_in_subtree( ref['id'], user_id) ref['subtree'] = [ProjectV3.wrap_member(context, p) for p in subtree] elif subtree_as_ids: ref['subtree'] = self.resource_api.get_projects_in_subtree_as_ids( ref['id']) @controller.protected() def get_project(self, context, project_id): ref = self.resource_api.get_project(project_id) self._expand_project_ref(context, ref) return ProjectV3.wrap_member(context, ref) @controller.protected() @validation.validated(schema.project_update, 'project') def update_project(self, context, project_id, project): self._require_matching_id(project_id, project) self._require_matching_domain_id( project_id, project, self.resource_api.get_project) initiator = notifications._get_request_audit_info(context) ref = self.resource_api.update_project(project_id, project, initiator=initiator) return ProjectV3.wrap_member(context, ref) @controller.protected() def delete_project(self, context, project_id): initiator = notifications._get_request_audit_info(context) return self.resource_api.delete_project(project_id, initiator=initiator) keystone-9.0.0/keystone/resource/routers.py0000664000567000056710000001151612701407102022265 0ustar jenkinsjenkins00000000000000# Copyright 2013 Metacloud, Inc. # Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """WSGI Routers for the Resource service.""" from keystone.common import json_home from keystone.common import router from keystone.common import wsgi from keystone.resource import controllers class Admin(wsgi.ComposableRouter): def add_routes(self, mapper): # Tenant Operations tenant_controller = controllers.Tenant() mapper.connect('/tenants', controller=tenant_controller, action='get_all_projects', conditions=dict(method=['GET'])) mapper.connect('/tenants/{tenant_id}', controller=tenant_controller, action='get_project', conditions=dict(method=['GET'])) class Routers(wsgi.RoutersBase): def append_v3_routers(self, mapper, routers): routers.append( router.Router(controllers.DomainV3(), 'domains', 'domain', resource_descriptions=self.v3_resources)) config_controller = controllers.DomainConfigV3() self._add_resource( mapper, config_controller, path='/domains/{domain_id}/config', get_head_action='get_domain_config', put_action='create_domain_config', patch_action='update_domain_config_only', delete_action='delete_domain_config', rel=json_home.build_v3_resource_relation('domain_config'), status=json_home.Status.EXPERIMENTAL, path_vars={ 'domain_id': json_home.Parameters.DOMAIN_ID }) config_group_param = ( json_home.build_v3_parameter_relation('config_group')) self._add_resource( mapper, config_controller, path='/domains/{domain_id}/config/{group}', get_head_action='get_domain_config', patch_action='update_domain_config_group', delete_action='delete_domain_config', rel=json_home.build_v3_resource_relation('domain_config_group'), status=json_home.Status.EXPERIMENTAL, path_vars={ 'domain_id': json_home.Parameters.DOMAIN_ID, 'group': config_group_param }) self._add_resource( mapper, config_controller, path='/domains/{domain_id}/config/{group}/{option}', get_head_action='get_domain_config', patch_action='update_domain_config', delete_action='delete_domain_config', rel=json_home.build_v3_resource_relation('domain_config_option'), status=json_home.Status.EXPERIMENTAL, path_vars={ 'domain_id': json_home.Parameters.DOMAIN_ID, 'group': config_group_param, 'option': json_home.build_v3_parameter_relation( 'config_option') }) self._add_resource( mapper, config_controller, path='/domains/config/default', get_action='get_domain_config_default', rel=json_home.build_v3_resource_relation('domain_config_default'), status=json_home.Status.EXPERIMENTAL) self._add_resource( mapper, config_controller, path='/domains/config/{group}/default', get_action='get_domain_config_default', rel=json_home.build_v3_resource_relation( 'domain_config_default_group'), status=json_home.Status.EXPERIMENTAL, path_vars={ 'group': config_group_param }) self._add_resource( mapper, config_controller, path='/domains/config/{group}/{option}/default', get_action='get_domain_config_default', rel=json_home.build_v3_resource_relation( 'domain_config_default_option'), status=json_home.Status.EXPERIMENTAL, path_vars={ 'group': config_group_param, 'option': json_home.build_v3_parameter_relation( 'config_option') }) routers.append( router.Router(controllers.ProjectV3(), 'projects', 'project', resource_descriptions=self.v3_resources)) keystone-9.0.0/keystone/resource/V8_backends/0000775000567000056710000000000012701407246022344 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/resource/V8_backends/__init__.py0000664000567000056710000000000012701407102024432 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/resource/V8_backends/sql.py0000664000567000056710000002422512701407102023511 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from keystone.common import clean from keystone.common import driver_hints from keystone.common import sql from keystone import exception from keystone.i18n import _LE from keystone import resource as keystone_resource LOG = log.getLogger(__name__) class Resource(keystone_resource.ResourceDriverV8): def default_assignment_driver(self): return 'sql' def _get_project(self, session, project_id): project_ref = session.query(Project).get(project_id) if project_ref is None: raise exception.ProjectNotFound(project_id=project_id) return project_ref def get_project(self, tenant_id): with sql.session_for_read() as session: return self._get_project(session, tenant_id).to_dict() def get_project_by_name(self, tenant_name, domain_id): with sql.session_for_read() as session: query = session.query(Project) query = query.filter_by(name=tenant_name) query = query.filter_by(domain_id=domain_id) try: project_ref = query.one() except sql.NotFound: raise exception.ProjectNotFound(project_id=tenant_name) return project_ref.to_dict() @driver_hints.truncated def list_projects(self, hints): with sql.session_for_read() as session: query = session.query(Project) project_refs = sql.filter_limit_query(Project, query, hints) return [project_ref.to_dict() for project_ref in project_refs] def list_projects_from_ids(self, ids): if not ids: return [] else: with sql.session_for_read() as session: query = session.query(Project) query = query.filter(Project.id.in_(ids)) return [project_ref.to_dict() for project_ref in query.all()] def list_project_ids_from_domain_ids(self, domain_ids): if not domain_ids: return [] else: with sql.session_for_read() as session: query = session.query(Project.id) query = ( query.filter(Project.domain_id.in_(domain_ids))) return [x.id for x in query.all()] def list_projects_in_domain(self, domain_id): with sql.session_for_read() as session: self._get_domain(session, domain_id) query = session.query(Project) project_refs = query.filter_by(domain_id=domain_id) return [project_ref.to_dict() for project_ref in project_refs] def _get_children(self, session, project_ids): query = session.query(Project) query = query.filter(Project.parent_id.in_(project_ids)) project_refs = query.all() return [project_ref.to_dict() for project_ref in project_refs] def list_projects_in_subtree(self, project_id): with sql.session_for_read() as session: children = self._get_children(session, [project_id]) subtree = [] examined = set([project_id]) while children: children_ids = set() for ref in children: if ref['id'] in examined: msg = _LE('Circular reference or a repeated ' 'entry found in projects hierarchy - ' '%(project_id)s.') LOG.error(msg, {'project_id': ref['id']}) return children_ids.add(ref['id']) examined.update(children_ids) subtree += children children = self._get_children(session, children_ids) return subtree def list_project_parents(self, project_id): with sql.session_for_read() as session: project = self._get_project(session, project_id).to_dict() parents = [] examined = set() while project.get('parent_id') is not None: if project['id'] in examined: msg = _LE('Circular reference or a repeated ' 'entry found in projects hierarchy - ' '%(project_id)s.') LOG.error(msg, {'project_id': project['id']}) return examined.add(project['id']) parent_project = self._get_project( session, project['parent_id']).to_dict() parents.append(parent_project) project = parent_project return parents def is_leaf_project(self, project_id): with sql.session_for_read() as session: project_refs = self._get_children(session, [project_id]) return not project_refs # CRUD @sql.handle_conflicts(conflict_type='project') def create_project(self, tenant_id, tenant): tenant['name'] = clean.project_name(tenant['name']) with sql.session_for_write() as session: tenant_ref = Project.from_dict(tenant) session.add(tenant_ref) return tenant_ref.to_dict() @sql.handle_conflicts(conflict_type='project') def update_project(self, tenant_id, tenant): if 'name' in tenant: tenant['name'] = clean.project_name(tenant['name']) with sql.session_for_write() as session: tenant_ref = self._get_project(session, tenant_id) old_project_dict = tenant_ref.to_dict() for k in tenant: old_project_dict[k] = tenant[k] new_project = Project.from_dict(old_project_dict) for attr in Project.attributes: if attr != 'id': setattr(tenant_ref, attr, getattr(new_project, attr)) tenant_ref.extra = new_project.extra return tenant_ref.to_dict(include_extra_dict=True) @sql.handle_conflicts(conflict_type='project') def delete_project(self, tenant_id): with sql.session_for_write() as session: tenant_ref = self._get_project(session, tenant_id) session.delete(tenant_ref) # domain crud @sql.handle_conflicts(conflict_type='domain') def create_domain(self, domain_id, domain): with sql.session_for_write() as session: ref = Domain.from_dict(domain) session.add(ref) return ref.to_dict() @driver_hints.truncated def list_domains(self, hints): with sql.session_for_read() as session: query = session.query(Domain) refs = sql.filter_limit_query(Domain, query, hints) return [ref.to_dict() for ref in refs] def list_domains_from_ids(self, ids): if not ids: return [] else: with sql.session_for_read() as session: query = session.query(Domain) query = query.filter(Domain.id.in_(ids)) domain_refs = query.all() return [domain_ref.to_dict() for domain_ref in domain_refs] def _get_domain(self, session, domain_id): ref = session.query(Domain).get(domain_id) if ref is None: raise exception.DomainNotFound(domain_id=domain_id) return ref def get_domain(self, domain_id): with sql.session_for_read() as session: return self._get_domain(session, domain_id).to_dict() def get_domain_by_name(self, domain_name): with sql.session_for_read() as session: try: ref = (session.query(Domain). filter_by(name=domain_name).one()) except sql.NotFound: raise exception.DomainNotFound(domain_id=domain_name) return ref.to_dict() @sql.handle_conflicts(conflict_type='domain') def update_domain(self, domain_id, domain): with sql.session_for_write() as session: ref = self._get_domain(session, domain_id) old_dict = ref.to_dict() for k in domain: old_dict[k] = domain[k] new_domain = Domain.from_dict(old_dict) for attr in Domain.attributes: if attr != 'id': setattr(ref, attr, getattr(new_domain, attr)) ref.extra = new_domain.extra return ref.to_dict() def delete_domain(self, domain_id): with sql.session_for_write() as session: ref = self._get_domain(session, domain_id) session.delete(ref) class Domain(sql.ModelBase, sql.DictBase): __tablename__ = 'domain' attributes = ['id', 'name', 'enabled'] id = sql.Column(sql.String(64), primary_key=True) name = sql.Column(sql.String(64), nullable=False) enabled = sql.Column(sql.Boolean, default=True, nullable=False) extra = sql.Column(sql.JsonBlob()) __table_args__ = (sql.UniqueConstraint('name'),) class Project(sql.ModelBase, sql.DictBase): __tablename__ = 'project' attributes = ['id', 'name', 'domain_id', 'description', 'enabled', 'parent_id', 'is_domain'] id = sql.Column(sql.String(64), primary_key=True) name = sql.Column(sql.String(64), nullable=False) domain_id = sql.Column(sql.String(64), sql.ForeignKey('domain.id'), nullable=False) description = sql.Column(sql.Text()) enabled = sql.Column(sql.Boolean) extra = sql.Column(sql.JsonBlob()) parent_id = sql.Column(sql.String(64), sql.ForeignKey('project.id')) is_domain = sql.Column(sql.Boolean, default=False, nullable=False, server_default='0') # Unique constraint across two columns to create the separation # rather than just only 'name' being unique __table_args__ = (sql.UniqueConstraint('domain_id', 'name'),) keystone-9.0.0/keystone/identity/0000775000567000056710000000000012701407246020217 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/identity/backends/0000775000567000056710000000000012701407246021771 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/identity/backends/__init__.py0000664000567000056710000000000012701407102024057 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/identity/backends/sql.py0000664000567000056710000003674512701407102023150 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy from sqlalchemy.ext.hybrid import hybrid_property from sqlalchemy import orm from keystone.common import driver_hints from keystone.common import sql from keystone.common import utils from keystone import exception from keystone.i18n import _ from keystone import identity class User(sql.ModelBase, sql.DictBase): __tablename__ = 'user' attributes = ['id', 'name', 'domain_id', 'password', 'enabled', 'default_project_id'] id = sql.Column(sql.String(64), primary_key=True) enabled = sql.Column(sql.Boolean) extra = sql.Column(sql.JsonBlob()) default_project_id = sql.Column(sql.String(64)) local_user = orm.relationship('LocalUser', uselist=False, single_parent=True, lazy='subquery', cascade='all,delete-orphan', backref='user') federated_users = orm.relationship('FederatedUser', single_parent=True, lazy='subquery', cascade='all,delete-orphan', backref='user') # name property @hybrid_property def name(self): if self.local_user: return self.local_user.name elif self.federated_users: return self.federated_users[0].display_name else: return None @name.setter def name(self, value): if not self.local_user: self.local_user = LocalUser() self.local_user.name = value @name.expression def name(cls): return LocalUser.name # password property @hybrid_property def password(self): if self.local_user and self.local_user.passwords: return self.local_user.passwords[0].password else: return None @password.setter def password(self, value): if not value: if self.local_user and self.local_user.passwords: self.local_user.passwords = [] else: if not self.local_user: self.local_user = LocalUser() if not self.local_user.passwords: self.local_user.passwords.append(Password()) self.local_user.passwords[0].password = value @password.expression def password(cls): return Password.password # domain_id property @hybrid_property def domain_id(self): if self.local_user: return self.local_user.domain_id else: return None @domain_id.setter def domain_id(self, value): if not self.local_user: self.local_user = LocalUser() self.local_user.domain_id = value @domain_id.expression def domain_id(cls): return LocalUser.domain_id def to_dict(self, include_extra_dict=False): d = super(User, self).to_dict(include_extra_dict=include_extra_dict) if 'default_project_id' in d and d['default_project_id'] is None: del d['default_project_id'] return d class LocalUser(sql.ModelBase, sql.DictBase): __tablename__ = 'local_user' attributes = ['id', 'user_id', 'domain_id', 'name'] id = sql.Column(sql.Integer, primary_key=True) user_id = sql.Column(sql.String(64), sql.ForeignKey('user.id', ondelete='CASCADE'), unique=True) domain_id = sql.Column(sql.String(64), nullable=False) name = sql.Column(sql.String(255), nullable=False) passwords = orm.relationship('Password', single_parent=True, cascade='all,delete-orphan', backref='local_user') __table_args__ = (sql.UniqueConstraint('domain_id', 'name'), {}) class Password(sql.ModelBase, sql.DictBase): __tablename__ = 'password' attributes = ['id', 'local_user_id', 'password'] id = sql.Column(sql.Integer, primary_key=True) local_user_id = sql.Column(sql.Integer, sql.ForeignKey('local_user.id', ondelete='CASCADE')) password = sql.Column(sql.String(128)) class FederatedUser(sql.ModelBase, sql.ModelDictMixin): __tablename__ = 'federated_user' attributes = ['id', 'user_id', 'idp_id', 'protocol_id', 'unique_id', 'display_name'] id = sql.Column(sql.Integer, primary_key=True) user_id = sql.Column(sql.String(64), sql.ForeignKey('user.id', ondelete='CASCADE')) idp_id = sql.Column(sql.String(64), sql.ForeignKey('identity_provider.id', ondelete='CASCADE')) protocol_id = sql.Column(sql.String(64), nullable=False) unique_id = sql.Column(sql.String(255), nullable=False) display_name = sql.Column(sql.String(255), nullable=True) __table_args__ = ( sql.UniqueConstraint('idp_id', 'protocol_id', 'unique_id'), sqlalchemy.ForeignKeyConstraint(['protocol_id', 'idp_id'], ['federation_protocol.id', 'federation_protocol.idp_id']) ) class Group(sql.ModelBase, sql.DictBase): __tablename__ = 'group' attributes = ['id', 'name', 'domain_id', 'description'] id = sql.Column(sql.String(64), primary_key=True) name = sql.Column(sql.String(64), nullable=False) domain_id = sql.Column(sql.String(64), nullable=False) description = sql.Column(sql.Text()) extra = sql.Column(sql.JsonBlob()) # Unique constraint across two columns to create the separation # rather than just only 'name' being unique __table_args__ = (sql.UniqueConstraint('domain_id', 'name'),) class UserGroupMembership(sql.ModelBase, sql.DictBase): """Group membership join table.""" __tablename__ = 'user_group_membership' user_id = sql.Column(sql.String(64), sql.ForeignKey('user.id'), primary_key=True) group_id = sql.Column(sql.String(64), sql.ForeignKey('group.id'), primary_key=True) class Identity(identity.IdentityDriverV8): # NOTE(henry-nash): Override the __init__() method so as to take a # config parameter to enable sql to be used as a domain-specific driver. def __init__(self, conf=None): self.conf = conf super(Identity, self).__init__() @property def is_sql(self): return True def _check_password(self, password, user_ref): """Check the specified password against the data store. Note that we'll pass in the entire user_ref in case the subclass needs things like user_ref.get('name') For further justification, please see the follow up suggestion at https://blueprints.launchpad.net/keystone/+spec/sql-identiy-pam """ return utils.check_password(password, user_ref.password) # Identity interface def authenticate(self, user_id, password): with sql.session_for_read() as session: user_ref = None try: user_ref = self._get_user(session, user_id) except exception.UserNotFound: raise AssertionError(_('Invalid user / password')) if not self._check_password(password, user_ref): raise AssertionError(_('Invalid user / password')) return identity.filter_user(user_ref.to_dict()) # user crud @sql.handle_conflicts(conflict_type='user') def create_user(self, user_id, user): user = utils.hash_user_password(user) with sql.session_for_write() as session: user_ref = User.from_dict(user) session.add(user_ref) return identity.filter_user(user_ref.to_dict()) @driver_hints.truncated def list_users(self, hints): with sql.session_for_read() as session: query = session.query(User).outerjoin(LocalUser) user_refs = sql.filter_limit_query(User, query, hints) return [identity.filter_user(x.to_dict()) for x in user_refs] def _get_user(self, session, user_id): user_ref = session.query(User).get(user_id) if not user_ref: raise exception.UserNotFound(user_id=user_id) return user_ref def get_user(self, user_id): with sql.session_for_read() as session: return identity.filter_user( self._get_user(session, user_id).to_dict()) def get_user_by_name(self, user_name, domain_id): with sql.session_for_read() as session: query = session.query(User).join(LocalUser) query = query.filter(sqlalchemy.and_(LocalUser.name == user_name, LocalUser.domain_id == domain_id)) try: user_ref = query.one() except sql.NotFound: raise exception.UserNotFound(user_id=user_name) return identity.filter_user(user_ref.to_dict()) @sql.handle_conflicts(conflict_type='user') def update_user(self, user_id, user): with sql.session_for_write() as session: user_ref = self._get_user(session, user_id) old_user_dict = user_ref.to_dict() user = utils.hash_user_password(user) for k in user: old_user_dict[k] = user[k] new_user = User.from_dict(old_user_dict) for attr in User.attributes: if attr != 'id': setattr(user_ref, attr, getattr(new_user, attr)) user_ref.extra = new_user.extra return identity.filter_user( user_ref.to_dict(include_extra_dict=True)) def add_user_to_group(self, user_id, group_id): with sql.session_for_write() as session: self.get_group(group_id) self.get_user(user_id) query = session.query(UserGroupMembership) query = query.filter_by(user_id=user_id) query = query.filter_by(group_id=group_id) rv = query.first() if rv: return session.add(UserGroupMembership(user_id=user_id, group_id=group_id)) def check_user_in_group(self, user_id, group_id): with sql.session_for_read() as session: self.get_group(group_id) self.get_user(user_id) query = session.query(UserGroupMembership) query = query.filter_by(user_id=user_id) query = query.filter_by(group_id=group_id) if not query.first(): raise exception.NotFound(_("User '%(user_id)s' not found in" " group '%(group_id)s'") % {'user_id': user_id, 'group_id': group_id}) def remove_user_from_group(self, user_id, group_id): # We don't check if user or group are still valid and let the remove # be tried anyway - in case this is some kind of clean-up operation with sql.session_for_write() as session: query = session.query(UserGroupMembership) query = query.filter_by(user_id=user_id) query = query.filter_by(group_id=group_id) membership_ref = query.first() if membership_ref is None: # Check if the group and user exist to return descriptive # exceptions. self.get_group(group_id) self.get_user(user_id) raise exception.NotFound(_("User '%(user_id)s' not found in" " group '%(group_id)s'") % {'user_id': user_id, 'group_id': group_id}) session.delete(membership_ref) def list_groups_for_user(self, user_id, hints): with sql.session_for_read() as session: self.get_user(user_id) query = session.query(Group).join(UserGroupMembership) query = query.filter(UserGroupMembership.user_id == user_id) query = sql.filter_limit_query(Group, query, hints) return [g.to_dict() for g in query] def list_users_in_group(self, group_id, hints): with sql.session_for_read() as session: self.get_group(group_id) query = session.query(User).outerjoin(LocalUser) query = query.join(UserGroupMembership) query = query.filter(UserGroupMembership.group_id == group_id) query = sql.filter_limit_query(User, query, hints) return [identity.filter_user(u.to_dict()) for u in query] def delete_user(self, user_id): with sql.session_for_write() as session: ref = self._get_user(session, user_id) q = session.query(UserGroupMembership) q = q.filter_by(user_id=user_id) q.delete(False) session.delete(ref) # group crud @sql.handle_conflicts(conflict_type='group') def create_group(self, group_id, group): with sql.session_for_write() as session: ref = Group.from_dict(group) session.add(ref) return ref.to_dict() @driver_hints.truncated def list_groups(self, hints): with sql.session_for_read() as session: query = session.query(Group) refs = sql.filter_limit_query(Group, query, hints) return [ref.to_dict() for ref in refs] def _get_group(self, session, group_id): ref = session.query(Group).get(group_id) if not ref: raise exception.GroupNotFound(group_id=group_id) return ref def get_group(self, group_id): with sql.session_for_read() as session: return self._get_group(session, group_id).to_dict() def get_group_by_name(self, group_name, domain_id): with sql.session_for_read() as session: query = session.query(Group) query = query.filter_by(name=group_name) query = query.filter_by(domain_id=domain_id) try: group_ref = query.one() except sql.NotFound: raise exception.GroupNotFound(group_id=group_name) return group_ref.to_dict() @sql.handle_conflicts(conflict_type='group') def update_group(self, group_id, group): with sql.session_for_write() as session: ref = self._get_group(session, group_id) old_dict = ref.to_dict() for k in group: old_dict[k] = group[k] new_group = Group.from_dict(old_dict) for attr in Group.attributes: if attr != 'id': setattr(ref, attr, getattr(new_group, attr)) ref.extra = new_group.extra return ref.to_dict() def delete_group(self, group_id): with sql.session_for_write() as session: ref = self._get_group(session, group_id) q = session.query(UserGroupMembership) q = q.filter_by(group_id=group_id) q.delete(False) session.delete(ref) keystone-9.0.0/keystone/identity/backends/ldap.py0000664000567000056710000004034112701407105023257 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import absolute_import import uuid import ldap.filter from oslo_config import cfg from oslo_log import log from oslo_log import versionutils import six from keystone.common import clean from keystone.common import driver_hints from keystone.common import ldap as common_ldap from keystone.common import models from keystone import exception from keystone.i18n import _ from keystone import identity CONF = cfg.CONF LOG = log.getLogger(__name__) _DEPRECATION_MSG = _('%s for the LDAP identity backend has been deprecated in ' 'the Mitaka release in favor of read-only identity LDAP ' 'access. It will be removed in the "O" release.') class Identity(identity.IdentityDriverV8): def __init__(self, conf=None): super(Identity, self).__init__() if conf is None: self.conf = CONF else: self.conf = conf self.user = UserApi(self.conf) self.group = GroupApi(self.conf) def is_domain_aware(self): return False def generates_uuids(self): return False # Identity interface def authenticate(self, user_id, password): try: user_ref = self._get_user(user_id) except exception.UserNotFound: raise AssertionError(_('Invalid user / password')) if not user_id or not password: raise AssertionError(_('Invalid user / password')) conn = None try: conn = self.user.get_connection(user_ref['dn'], password, end_user_auth=True) if not conn: raise AssertionError(_('Invalid user / password')) except Exception: raise AssertionError(_('Invalid user / password')) finally: if conn: conn.unbind_s() return self.user.filter_attributes(user_ref) def _get_user(self, user_id): return self.user.get(user_id) def get_user(self, user_id): return self.user.get_filtered(user_id) def list_users(self, hints): return self.user.get_all_filtered(hints) def get_user_by_name(self, user_name, domain_id): # domain_id will already have been handled in the Manager layer, # parameter left in so this matches the Driver specification return self.user.filter_attributes(self.user.get_by_name(user_name)) # CRUD def create_user(self, user_id, user): msg = _DEPRECATION_MSG % "create_user" versionutils.report_deprecated_feature(LOG, msg) self.user.check_allow_create() user_ref = self.user.create(user) return self.user.filter_attributes(user_ref) def update_user(self, user_id, user): msg = _DEPRECATION_MSG % "update_user" versionutils.report_deprecated_feature(LOG, msg) self.user.check_allow_update() old_obj = self.user.get(user_id) if 'name' in user and old_obj.get('name') != user['name']: raise exception.Conflict(_('Cannot change user name')) if self.user.enabled_mask: self.user.mask_enabled_attribute(user) elif self.user.enabled_invert and not self.user.enabled_emulation: # We need to invert the enabled value for the old model object # to prevent the LDAP update code from thinking that the enabled # values are already equal. user['enabled'] = not user['enabled'] old_obj['enabled'] = not old_obj['enabled'] self.user.update(user_id, user, old_obj) return self.user.get_filtered(user_id) def delete_user(self, user_id): msg = _DEPRECATION_MSG % "delete_user" versionutils.report_deprecated_feature(LOG, msg) self.user.check_allow_delete() user = self.user.get(user_id) user_dn = user['dn'] groups = self.group.list_user_groups(user_dn) for group in groups: self.group.remove_user(user_dn, group['id'], user_id) if hasattr(user, 'tenant_id'): self.project.remove_user(user.tenant_id, user_dn) self.user.delete(user_id) def create_group(self, group_id, group): msg = _DEPRECATION_MSG % "create_group" versionutils.report_deprecated_feature(LOG, msg) self.group.check_allow_create() group['name'] = clean.group_name(group['name']) return common_ldap.filter_entity(self.group.create(group)) def get_group(self, group_id): return self.group.get_filtered(group_id) def get_group_by_name(self, group_name, domain_id): # domain_id will already have been handled in the Manager layer, # parameter left in so this matches the Driver specification return self.group.get_filtered_by_name(group_name) def update_group(self, group_id, group): msg = _DEPRECATION_MSG % "update_group" versionutils.report_deprecated_feature(LOG, msg) self.group.check_allow_update() if 'name' in group: group['name'] = clean.group_name(group['name']) return common_ldap.filter_entity(self.group.update(group_id, group)) def delete_group(self, group_id): msg = _DEPRECATION_MSG % "delete_group" versionutils.report_deprecated_feature(LOG, msg) self.group.check_allow_delete() return self.group.delete(group_id) def add_user_to_group(self, user_id, group_id): msg = _DEPRECATION_MSG % "add_user_to_group" versionutils.report_deprecated_feature(LOG, msg) user_ref = self._get_user(user_id) user_dn = user_ref['dn'] self.group.add_user(user_dn, group_id, user_id) def remove_user_from_group(self, user_id, group_id): msg = _DEPRECATION_MSG % "remove_user_from_group" versionutils.report_deprecated_feature(LOG, msg) user_ref = self._get_user(user_id) user_dn = user_ref['dn'] self.group.remove_user(user_dn, group_id, user_id) def list_groups_for_user(self, user_id, hints): user_ref = self._get_user(user_id) if self.conf.ldap.group_members_are_ids: user_dn = user_ref['id'] else: user_dn = user_ref['dn'] return self.group.list_user_groups_filtered(user_dn, hints) def list_groups(self, hints): return self.group.get_all_filtered(hints) def list_users_in_group(self, group_id, hints): users = [] for user_key in self.group.list_group_users(group_id): if self.conf.ldap.group_members_are_ids: user_id = user_key else: user_id = self.user._dn_to_id(user_key) try: users.append(self.user.get_filtered(user_id)) except exception.UserNotFound: LOG.debug(("Group member '%(user_key)s' not found in" " '%(group_id)s'. The user should be removed" " from the group. The user will be ignored."), dict(user_key=user_key, group_id=group_id)) return users def check_user_in_group(self, user_id, group_id): user_refs = self.list_users_in_group(group_id, driver_hints.Hints()) for x in user_refs: if x['id'] == user_id: break else: # Try to fetch the user to see if it even exists. This # will raise a more accurate exception. self.get_user(user_id) raise exception.NotFound(_("User '%(user_id)s' not found in" " group '%(group_id)s'") % {'user_id': user_id, 'group_id': group_id}) # TODO(termie): turn this into a data object and move logic to driver class UserApi(common_ldap.EnabledEmuMixIn, common_ldap.BaseLdap): DEFAULT_OU = 'ou=Users' DEFAULT_STRUCTURAL_CLASSES = ['person'] DEFAULT_ID_ATTR = 'cn' DEFAULT_OBJECTCLASS = 'inetOrgPerson' NotFound = exception.UserNotFound options_name = 'user' attribute_options_names = {'password': 'pass', 'email': 'mail', 'name': 'name', 'description': 'description', 'enabled': 'enabled', 'default_project_id': 'default_project_id'} immutable_attrs = ['id'] model = models.User def __init__(self, conf): super(UserApi, self).__init__(conf) self.enabled_mask = conf.ldap.user_enabled_mask self.enabled_default = conf.ldap.user_enabled_default self.enabled_invert = conf.ldap.user_enabled_invert self.enabled_emulation = conf.ldap.user_enabled_emulation def _ldap_res_to_model(self, res): obj = super(UserApi, self)._ldap_res_to_model(res) if self.enabled_mask != 0: enabled = int(obj.get('enabled', self.enabled_default)) obj['enabled'] = ((enabled & self.enabled_mask) != self.enabled_mask) elif self.enabled_invert and not self.enabled_emulation: # This could be a bool or a string. If it's a string, # we need to convert it so we can invert it properly. enabled = obj.get('enabled', self.enabled_default) if isinstance(enabled, six.string_types): if enabled.lower() == 'true': enabled = True else: enabled = False obj['enabled'] = not enabled obj['dn'] = res[0] return obj def mask_enabled_attribute(self, values): value = values['enabled'] values.setdefault('enabled_nomask', int(self.enabled_default)) if value != ((values['enabled_nomask'] & self.enabled_mask) != self.enabled_mask): values['enabled_nomask'] ^= self.enabled_mask values['enabled'] = values['enabled_nomask'] del values['enabled_nomask'] def create(self, values): if self.enabled_mask: orig_enabled = values['enabled'] self.mask_enabled_attribute(values) elif self.enabled_invert and not self.enabled_emulation: orig_enabled = values['enabled'] if orig_enabled is not None: values['enabled'] = not orig_enabled else: values['enabled'] = self.enabled_default values = super(UserApi, self).create(values) if self.enabled_mask or (self.enabled_invert and not self.enabled_emulation): values['enabled'] = orig_enabled return values def get_filtered(self, user_id): user = self.get(user_id) return self.filter_attributes(user) def get_all_filtered(self, hints): query = self.filter_query(hints) return [self.filter_attributes(user) for user in self.get_all(query, hints)] def filter_attributes(self, user): return identity.filter_user(common_ldap.filter_entity(user)) def is_user(self, dn): """Returns True if the entry is a user.""" # NOTE(blk-u): It's easy to check if the DN is under the User tree, # but may not be accurate. A more accurate test would be to fetch the # entry to see if it's got the user objectclass, but this could be # really expensive considering how this is used. return common_ldap.dn_startswith(dn, self.tree_dn) class GroupApi(common_ldap.BaseLdap): DEFAULT_OU = 'ou=UserGroups' DEFAULT_STRUCTURAL_CLASSES = [] DEFAULT_OBJECTCLASS = 'groupOfNames' DEFAULT_ID_ATTR = 'cn' DEFAULT_MEMBER_ATTRIBUTE = 'member' NotFound = exception.GroupNotFound options_name = 'group' attribute_options_names = {'description': 'desc', 'name': 'name'} immutable_attrs = ['name'] model = models.Group def _ldap_res_to_model(self, res): model = super(GroupApi, self)._ldap_res_to_model(res) model['dn'] = res[0] return model def __init__(self, conf): super(GroupApi, self).__init__(conf) self.member_attribute = (conf.ldap.group_member_attribute or self.DEFAULT_MEMBER_ATTRIBUTE) def create(self, values): data = values.copy() if data.get('id') is None: data['id'] = uuid.uuid4().hex if 'description' in data and data['description'] in ['', None]: data.pop('description') return super(GroupApi, self).create(data) def delete(self, group_id): if self.subtree_delete_enabled: super(GroupApi, self).delete_tree(group_id) else: # TODO(spzala): this is only placeholder for group and domain # role support which will be added under bug 1101287 group_ref = self.get(group_id) group_dn = group_ref['dn'] if group_dn: self._delete_tree_nodes(group_dn, ldap.SCOPE_ONELEVEL) super(GroupApi, self).delete(group_id) def update(self, group_id, values): old_obj = self.get(group_id) return super(GroupApi, self).update(group_id, values, old_obj) def add_user(self, user_dn, group_id, user_id): group_ref = self.get(group_id) group_dn = group_ref['dn'] try: super(GroupApi, self).add_member(user_dn, group_dn) except exception.Conflict: raise exception.Conflict(_( 'User %(user_id)s is already a member of group %(group_id)s') % {'user_id': user_id, 'group_id': group_id}) def remove_user(self, user_dn, group_id, user_id): group_ref = self.get(group_id) group_dn = group_ref['dn'] try: super(GroupApi, self).remove_member(user_dn, group_dn) except ldap.NO_SUCH_ATTRIBUTE: raise exception.UserNotFound(user_id=user_id) def list_user_groups(self, user_dn): """Return a list of groups for which the user is a member.""" user_dn_esc = ldap.filter.escape_filter_chars(user_dn) query = '(%s=%s)%s' % (self.member_attribute, user_dn_esc, self.ldap_filter or '') return self.get_all(query) def list_user_groups_filtered(self, user_dn, hints): """Return a filtered list of groups for which the user is a member.""" user_dn_esc = ldap.filter.escape_filter_chars(user_dn) query = '(%s=%s)%s' % (self.member_attribute, user_dn_esc, self.ldap_filter or '') return self.get_all_filtered(hints, query) def list_group_users(self, group_id): """Return a list of user dns which are members of a group.""" group_ref = self.get(group_id) group_dn = group_ref['dn'] try: attrs = self._ldap_get_list(group_dn, ldap.SCOPE_BASE, attrlist=[self.member_attribute]) except ldap.NO_SUCH_OBJECT: raise self.NotFound(group_id=group_id) users = [] for dn, member in attrs: user_dns = member.get(self.member_attribute, []) for user_dn in user_dns: if self._is_dumb_member(user_dn): continue users.append(user_dn) return users def get_filtered(self, group_id): group = self.get(group_id) return common_ldap.filter_entity(group) def get_filtered_by_name(self, group_name): group = self.get_by_name(group_name) return common_ldap.filter_entity(group) def get_all_filtered(self, hints, query=None): query = self.filter_query(hints, query) return [common_ldap.filter_entity(group) for group in self.get_all(query, hints)] keystone-9.0.0/keystone/identity/shadow_backends/0000775000567000056710000000000012701407246023336 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/identity/shadow_backends/__init__.py0000664000567000056710000000000012701407102025424 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/identity/shadow_backends/sql.py0000664000567000056710000000623212701407102024501 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from keystone.common import sql from keystone import exception from keystone import identity from keystone.identity.backends import sql as model class ShadowUsers(identity.ShadowUsersDriverV9): @sql.handle_conflicts(conflict_type='federated_user') def create_federated_user(self, federated_dict): user = { 'id': uuid.uuid4().hex, 'enabled': True } with sql.session_for_write() as session: federated_ref = model.FederatedUser.from_dict(federated_dict) user_ref = model.User.from_dict(user) user_ref.federated_users.append(federated_ref) session.add(user_ref) return identity.filter_user(user_ref.to_dict()) def get_federated_user(self, idp_id, protocol_id, unique_id): user_ref = self._get_federated_user(idp_id, protocol_id, unique_id) return identity.filter_user(user_ref.to_dict()) def _get_federated_user(self, idp_id, protocol_id, unique_id): """Returns the found user for the federated identity :param idp_id: The identity provider ID :param protocol_id: The federation protocol ID :param unique_id: The user's unique ID (unique within the IdP) :returns User: Returns a reference to the User """ with sql.session_for_read() as session: query = session.query(model.User).outerjoin(model.LocalUser) query = query.join(model.FederatedUser) query = query.filter(model.FederatedUser.idp_id == idp_id) query = query.filter(model.FederatedUser.protocol_id == protocol_id) query = query.filter(model.FederatedUser.unique_id == unique_id) try: user_ref = query.one() except sql.NotFound: raise exception.UserNotFound(user_id=unique_id) return user_ref @sql.handle_conflicts(conflict_type='federated_user') def update_federated_user_display_name(self, idp_id, protocol_id, unique_id, display_name): with sql.session_for_write() as session: query = session.query(model.FederatedUser) query = query.filter(model.FederatedUser.idp_id == idp_id) query = query.filter(model.FederatedUser.protocol_id == protocol_id) query = query.filter(model.FederatedUser.unique_id == unique_id) query = query.filter(model.FederatedUser.display_name != display_name) query.update({'display_name': display_name}) return keystone-9.0.0/keystone/identity/schema.py0000664000567000056710000000352512701407105022030 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common import validation from keystone.common.validation import parameter_types # NOTE(lhcheng): the max length is not applicable since it is specific # to the SQL backend, LDAP does not have length limitation. _identity_name = { 'type': 'string', 'minLength': 1 } _user_properties = { 'default_project_id': validation.nullable(parameter_types.id_string), 'description': validation.nullable(parameter_types.description), 'domain_id': parameter_types.id_string, 'enabled': parameter_types.boolean, 'name': _identity_name, 'password': { 'type': ['string', 'null'] } } user_create = { 'type': 'object', 'properties': _user_properties, 'required': ['name'], 'additionalProperties': True } user_update = { 'type': 'object', 'properties': _user_properties, 'minProperties': 1, 'additionalProperties': True } _group_properties = { 'description': validation.nullable(parameter_types.description), 'domain_id': parameter_types.id_string, 'name': _identity_name } group_create = { 'type': 'object', 'properties': _group_properties, 'required': ['name'], 'additionalProperties': True } group_update = { 'type': 'object', 'properties': _group_properties, 'minProperties': 1, 'additionalProperties': True } keystone-9.0.0/keystone/identity/__init__.py0000664000567000056710000000133112701407102022315 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.identity import controllers # noqa from keystone.identity.core import * # noqa from keystone.identity import generator # noqa keystone-9.0.0/keystone/identity/core.py0000664000567000056710000020302512701407105021515 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Main entry point into the Identity service.""" import abc import functools import os import threading import uuid from oslo_config import cfg from oslo_log import log from oslo_log import versionutils import six from keystone import assignment # TODO(lbragstad): Decouple this dependency from keystone.common import cache from keystone.common import clean from keystone.common import config from keystone.common import dependency from keystone.common import driver_hints from keystone.common import manager from keystone import exception from keystone.i18n import _, _LW from keystone.identity.mapping_backends import mapping from keystone import notifications CONF = cfg.CONF LOG = log.getLogger(__name__) MEMOIZE = cache.get_memoization_decorator(group='identity') DOMAIN_CONF_FHEAD = 'keystone.' DOMAIN_CONF_FTAIL = '.conf' # The number of times we will attempt to register a domain to use the SQL # driver, if we find that another process is in the middle of registering or # releasing at the same time as us. REGISTRATION_ATTEMPTS = 10 # Config Registration Types SQL_DRIVER = 'SQL' def filter_user(user_ref): """Filter out private items in a user dict. 'password', 'tenants' and 'groups' are never returned. :returns: user_ref """ if user_ref: user_ref = user_ref.copy() user_ref.pop('password', None) user_ref.pop('tenants', None) user_ref.pop('groups', None) user_ref.pop('domains', None) try: user_ref['extra'].pop('password', None) user_ref['extra'].pop('tenants', None) except KeyError: # nosec # ok to not have extra in the user_ref. pass return user_ref @dependency.requires('domain_config_api', 'resource_api') class DomainConfigs(dict): """Discover, store and provide access to domain specific configs. The setup_domain_drivers() call will be made via the wrapper from the first call to any driver function handled by this manager. Domain specific configurations are only supported for the identity backend and the individual configurations are either specified in the resource database or in individual domain configuration files, depending on the setting of the 'domain_configurations_from_database' config option. The result will be that for each domain with a specific configuration, this class will hold a reference to a ConfigOpts and driver object that the identity manager and driver can use. """ configured = False driver = None _any_sql = False lock = threading.Lock() def _load_driver(self, domain_config): return manager.load_driver(Manager.driver_namespace, domain_config['cfg'].identity.driver, domain_config['cfg']) def _load_config_from_file(self, resource_api, file_list, domain_name): def _assert_no_more_than_one_sql_driver(domain_id, new_config, config_file): """Ensure there is no more than one sql driver. Check to see if the addition of the driver in this new config would cause there to be more than one sql driver. """ if (new_config['driver'].is_sql and (self.driver.is_sql or self._any_sql)): # The addition of this driver would cause us to have more than # one sql driver, so raise an exception. raise exception.MultipleSQLDriversInConfig(source=config_file) self._any_sql = self._any_sql or new_config['driver'].is_sql try: domain_ref = resource_api.get_domain_by_name(domain_name) except exception.DomainNotFound: LOG.warning( _LW('Invalid domain name (%s) found in config file name'), domain_name) return # Create a new entry in the domain config dict, which contains # a new instance of both the conf environment and driver using # options defined in this set of config files. Later, when we # service calls via this Manager, we'll index via this domain # config dict to make sure we call the right driver domain_config = {} domain_config['cfg'] = cfg.ConfigOpts() config.configure(conf=domain_config['cfg']) domain_config['cfg'](args=[], project='keystone', default_config_files=file_list) domain_config['driver'] = self._load_driver(domain_config) _assert_no_more_than_one_sql_driver(domain_ref['id'], domain_config, file_list) self[domain_ref['id']] = domain_config def _setup_domain_drivers_from_files(self, standard_driver, resource_api): """Read the domain specific configuration files and load the drivers. Domain configuration files are stored in the domain config directory, and must be named of the form: keystone..conf For each file, call the load config method where the domain_name will be turned into a domain_id and then: - Create a new config structure, adding in the specific additional options defined in this config file - Initialise a new instance of the required driver with this new config """ conf_dir = CONF.identity.domain_config_dir if not os.path.exists(conf_dir): LOG.warning(_LW('Unable to locate domain config directory: %s'), conf_dir) return for r, d, f in os.walk(conf_dir): for fname in f: if (fname.startswith(DOMAIN_CONF_FHEAD) and fname.endswith(DOMAIN_CONF_FTAIL)): if fname.count('.') >= 2: self._load_config_from_file( resource_api, [os.path.join(r, fname)], fname[len(DOMAIN_CONF_FHEAD): -len(DOMAIN_CONF_FTAIL)]) else: LOG.debug(('Ignoring file (%s) while scanning domain ' 'config directory'), fname) def _load_config_from_database(self, domain_id, specific_config): def _assert_no_more_than_one_sql_driver(domain_id, new_config): """Ensure adding driver doesn't push us over the limit of 1 The checks we make in this method need to take into account that we may be in a multiple process configuration and ensure that any race conditions are avoided. """ if not new_config['driver'].is_sql: self.domain_config_api.release_registration(domain_id) return # To ensure the current domain is the only SQL driver, we attempt # to register our use of SQL. If we get it we know we are good, # if we fail to register it then we should: # # - First check if another process has registered for SQL for our # domain, in which case we are fine # - If a different domain has it, we should check that this domain # is still valid, in case, for example, domain deletion somehow # failed to remove its registration (i.e. we self heal for these # kinds of issues). domain_registered = 'Unknown' for attempt in range(REGISTRATION_ATTEMPTS): if self.domain_config_api.obtain_registration( domain_id, SQL_DRIVER): LOG.debug('Domain %s successfully registered to use the ' 'SQL driver.', domain_id) return # We failed to register our use, let's find out who is using it try: domain_registered = ( self.domain_config_api.read_registration( SQL_DRIVER)) except exception.ConfigRegistrationNotFound: msg = ('While attempting to register domain %(domain)s to ' 'use the SQL driver, another process released it, ' 'retrying (attempt %(attempt)s).') LOG.debug(msg, {'domain': domain_id, 'attempt': attempt + 1}) continue if domain_registered == domain_id: # Another process already registered it for us, so we are # fine. In the race condition when another process is # in the middle of deleting this domain, we know the domain # is already disabled and hence telling the caller that we # are registered is benign. LOG.debug('While attempting to register domain %s to use ' 'the SQL driver, found that another process had ' 'already registered this domain. This is normal ' 'in multi-process configurations.', domain_id) return # So we don't have it, but someone else does...let's check that # this domain is still valid try: self.resource_api.get_domain(domain_registered) except exception.DomainNotFound: msg = ('While attempting to register domain %(domain)s to ' 'use the SQL driver, found that it was already ' 'registered to a domain that no longer exists ' '(%(old_domain)s). Removing this stale ' 'registration and retrying (attempt %(attempt)s).') LOG.debug(msg, {'domain': domain_id, 'old_domain': domain_registered, 'attempt': attempt + 1}) self.domain_config_api.release_registration( domain_registered, type=SQL_DRIVER) continue # The domain is valid, so we really do have an attempt at more # than one SQL driver. details = ( _('Config API entity at /domains/%s/config') % domain_id) raise exception.MultipleSQLDriversInConfig(source=details) # We fell out of the loop without either registering our domain or # being able to find who has it...either we were very very very # unlucky or something is awry. msg = _('Exceeded attempts to register domain %(domain)s to use ' 'the SQL driver, the last domain that appears to have ' 'had it is %(last_domain)s, giving up') % { 'domain': domain_id, 'last_domain': domain_registered} raise exception.UnexpectedError(msg) domain_config = {} domain_config['cfg'] = cfg.ConfigOpts() config.configure(conf=domain_config['cfg']) domain_config['cfg'](args=[], project='keystone', default_config_files=[]) # Override any options that have been passed in as specified in the # database. for group in specific_config: for option in specific_config[group]: domain_config['cfg'].set_override( option, specific_config[group][option], group, enforce_type=True) domain_config['cfg_overrides'] = specific_config domain_config['driver'] = self._load_driver(domain_config) _assert_no_more_than_one_sql_driver(domain_id, domain_config) self[domain_id] = domain_config def _setup_domain_drivers_from_database(self, standard_driver, resource_api): """Read domain specific configuration from database and load drivers. Domain configurations are stored in the domain-config backend, so we go through each domain to find those that have a specific config defined, and for those that do we: - Create a new config structure, overriding any specific options defined in the resource backend - Initialise a new instance of the required driver with this new config """ for domain in resource_api.list_domains(): domain_config_options = ( self.domain_config_api. get_config_with_sensitive_info(domain['id'])) if domain_config_options: self._load_config_from_database(domain['id'], domain_config_options) def setup_domain_drivers(self, standard_driver, resource_api): # This is called by the api call wrapper self.driver = standard_driver if CONF.identity.domain_configurations_from_database: self._setup_domain_drivers_from_database(standard_driver, resource_api) else: self._setup_domain_drivers_from_files(standard_driver, resource_api) self.configured = True def get_domain_driver(self, domain_id): self.check_config_and_reload_domain_driver_if_required(domain_id) if domain_id in self: return self[domain_id]['driver'] def get_domain_conf(self, domain_id): self.check_config_and_reload_domain_driver_if_required(domain_id) if domain_id in self: return self[domain_id]['cfg'] else: return CONF def reload_domain_driver(self, domain_id): # Only used to support unit tests that want to set # new config values. This should only be called once # the domains have been configured, since it relies on # the fact that the configuration files/database have already been # read. if self.configured: if domain_id in self: self[domain_id]['driver'] = ( self._load_driver(self[domain_id])) else: # The standard driver self.driver = self.driver() def check_config_and_reload_domain_driver_if_required(self, domain_id): """Check for, and load, any new domain specific config for this domain. This is only supported for the database-stored domain specific configuration. When the domain specific drivers were set up, we stored away the specific config for this domain that was available at that time. So we now read the current version and compare. While this might seem somewhat inefficient, the sensitive config call is cached, so should be light weight. More importantly, when the cache timeout is reached, we will get any config that has been updated from any other keystone process. This cache-timeout approach works for both multi-process and multi-threaded keystone configurations. In multi-threaded configurations, even though we might remove a driver object (that could be in use by another thread), this won't actually be thrown away until all references to it have been broken. When that other thread is released back and is restarted with another command to process, next time it accesses the driver it will pickup the new one. """ if (not CONF.identity.domain_specific_drivers_enabled or not CONF.identity.domain_configurations_from_database): # If specific drivers are not enabled, then there is nothing to do. # If we are not storing the configurations in the database, then # we'll only re-read the domain specific config files on startup # of keystone. return latest_domain_config = ( self.domain_config_api. get_config_with_sensitive_info(domain_id)) domain_config_in_use = domain_id in self if latest_domain_config: if (not domain_config_in_use or latest_domain_config != self[domain_id]['cfg_overrides']): self._load_config_from_database(domain_id, latest_domain_config) elif domain_config_in_use: # The domain specific config has been deleted, so should remove the # specific driver for this domain. try: del self[domain_id] except KeyError: # nosec # Allow this error in case we are unlucky and in a # multi-threaded situation, two threads happen to be running # in lock step. pass # If we fall into the else condition, this means there is no domain # config set, and there is none in use either, so we have nothing # to do. def domains_configured(f): """Wraps API calls to lazy load domain configs after init. This is required since the assignment manager needs to be initialized before this manager, and yet this manager's init wants to be able to make assignment calls (to build the domain configs). So instead, we check if the domains have been initialized on entry to each call, and if requires load them, """ @functools.wraps(f) def wrapper(self, *args, **kwargs): if (not self.domain_configs.configured and CONF.identity.domain_specific_drivers_enabled): # If domain specific driver has not been configured, acquire the # lock and proceed with loading the driver. with self.domain_configs.lock: # Check again just in case some other thread has already # completed domain config. if not self.domain_configs.configured: self.domain_configs.setup_domain_drivers( self.driver, self.resource_api) return f(self, *args, **kwargs) return wrapper def exception_translated(exception_type): """Wraps API calls to map to correct exception.""" def _exception_translated(f): @functools.wraps(f) def wrapper(self, *args, **kwargs): try: return f(self, *args, **kwargs) except exception.PublicIDNotFound as e: if exception_type == 'user': raise exception.UserNotFound(user_id=str(e)) elif exception_type == 'group': raise exception.GroupNotFound(group_id=str(e)) elif exception_type == 'assertion': raise AssertionError(_('Invalid user / password')) else: raise return wrapper return _exception_translated @notifications.listener @dependency.provider('identity_api') @dependency.requires('assignment_api', 'credential_api', 'id_mapping_api', 'resource_api', 'revoke_api', 'shadow_users_api') class Manager(manager.Manager): """Default pivot point for the Identity backend. See :mod:`keystone.common.manager.Manager` for more details on how this dynamically calls the backend. This class also handles the support of domain specific backends, by using the DomainConfigs class. The setup call for DomainConfigs is called from with the @domains_configured wrapper in a lazy loading fashion to get around the fact that we can't satisfy the assignment api it needs from within our __init__() function since the assignment driver is not itself yet initialized. Each of the identity calls are pre-processed here to choose, based on domain, which of the drivers should be called. The non-domain-specific driver is still in place, and is used if there is no specific driver for the domain in question (or we are not using multiple domain drivers). Starting with Juno, in order to be able to obtain the domain from just an ID being presented as part of an API call, a public ID to domain and local ID mapping is maintained. This mapping also allows for the local ID of drivers that do not provide simple UUIDs (such as LDAP) to be referenced via a public facing ID. The mapping itself is automatically generated as entities are accessed via the driver. This mapping is only used when: - the entity is being handled by anything other than the default driver, or - the entity is being handled by the default LDAP driver and backward compatible IDs are not required. This means that in the standard case of a single SQL backend or the default settings of a single LDAP backend (since backward compatible IDs is set to True by default), no mapping is used. An alternative approach would be to always use the mapping table, but in the cases where we don't need it to make the public and local IDs the same. It is felt that not using the mapping by default is a more prudent way to introduce this functionality. """ driver_namespace = 'keystone.identity' _USER = 'user' _GROUP = 'group' def __init__(self): super(Manager, self).__init__(CONF.identity.driver) self.domain_configs = DomainConfigs() self.event_callbacks = { notifications.ACTIONS.deleted: { 'domain': [self._domain_deleted], }, } def _domain_deleted(self, service, resource_type, operation, payload): domain_id = payload['resource_info'] user_refs = self.list_users(domain_scope=domain_id) group_refs = self.list_groups(domain_scope=domain_id) for group in group_refs: # Cleanup any existing groups. try: self.delete_group(group['id']) except exception.GroupNotFound: LOG.debug(('Group %(groupid)s not found when deleting domain ' 'contents for %(domainid)s, continuing with ' 'cleanup.'), {'groupid': group['id'], 'domainid': domain_id}) # And finally, delete the users themselves for user in user_refs: try: self.delete_user(user['id']) except exception.UserNotFound: LOG.debug(('User %(userid)s not found when deleting domain ' 'contents for %(domainid)s, continuing with ' 'cleanup.'), {'userid': user['id'], 'domainid': domain_id}) # Domain ID normalization methods def _set_domain_id_and_mapping(self, ref, domain_id, driver, entity_type): """Patch the domain_id/public_id into the resulting entity(ies). :param ref: the entity or list of entities to post process :param domain_id: the domain scope used for the call :param driver: the driver used to execute the call :param entity_type: whether this is a user or group :returns: post processed entity or list or entities Called to post-process the entity being returned, using a mapping to substitute a public facing ID as necessary. This method must take into account: - If the driver is not domain aware, then we must set the domain attribute of all entities irrespective of mapping. - If the driver does not support UUIDs, then we always want to provide a mapping, except for the special case of this being the default driver and backward_compatible_ids is set to True. This is to ensure that entity IDs do not change for an existing LDAP installation (only single domain/driver LDAP configurations were previously supported). - If the driver does support UUIDs, then we always create a mapping entry, but use the local UUID as the public ID. The exception to - this is that if we just have single driver (i.e. not using specific multi-domain configs), then we don't both with the mapping at all. """ conf = CONF.identity if not self._needs_post_processing(driver): # a classic case would be when running with a single SQL driver return ref LOG.debug('ID Mapping - Domain ID: %(domain)s, ' 'Default Driver: %(driver)s, ' 'Domains: %(aware)s, UUIDs: %(generate)s, ' 'Compatible IDs: %(compat)s', {'domain': domain_id, 'driver': (driver == self.driver), 'aware': driver.is_domain_aware(), 'generate': driver.generates_uuids(), 'compat': CONF.identity_mapping.backward_compatible_ids}) if isinstance(ref, dict): return self._set_domain_id_and_mapping_for_single_ref( ref, domain_id, driver, entity_type, conf) elif isinstance(ref, list): return [self._set_domain_id_and_mapping( x, domain_id, driver, entity_type) for x in ref] else: raise ValueError(_('Expected dict or list: %s') % type(ref)) def _needs_post_processing(self, driver): """Returns whether entity from driver needs domain added or mapping.""" return (driver is not self.driver or not driver.generates_uuids() or not driver.is_domain_aware()) def _set_domain_id_and_mapping_for_single_ref(self, ref, domain_id, driver, entity_type, conf): LOG.debug('Local ID: %s', ref['id']) ref = ref.copy() self._insert_domain_id_if_needed(ref, driver, domain_id, conf) if self._is_mapping_needed(driver): local_entity = {'domain_id': ref['domain_id'], 'local_id': ref['id'], 'entity_type': entity_type} public_id = self.id_mapping_api.get_public_id(local_entity) if public_id: ref['id'] = public_id LOG.debug('Found existing mapping to public ID: %s', ref['id']) else: # Need to create a mapping. If the driver generates UUIDs # then pass the local UUID in as the public ID to use. if driver.generates_uuids(): public_id = ref['id'] ref['id'] = self.id_mapping_api.create_id_mapping( local_entity, public_id) LOG.debug('Created new mapping to public ID: %s', ref['id']) return ref def _insert_domain_id_if_needed(self, ref, driver, domain_id, conf): """Inserts the domain ID into the ref, if required. If the driver can't handle domains, then we need to insert the domain_id into the entity being returned. If the domain_id is None that means we are running in a single backend mode, so to remain backwardly compatible, we put in the default domain ID. """ if not driver.is_domain_aware(): if domain_id is None: domain_id = conf.default_domain_id ref['domain_id'] = domain_id def _is_mapping_needed(self, driver): """Returns whether mapping is needed. There are two situations where we must use the mapping: - this isn't the default driver (i.e. multiple backends), or - we have a single backend that doesn't use UUIDs The exception to the above is that we must honor backward compatibility if this is the default driver (e.g. to support current LDAP) """ is_not_default_driver = driver is not self.driver return (is_not_default_driver or ( not driver.generates_uuids() and not CONF.identity_mapping.backward_compatible_ids)) def _clear_domain_id_if_domain_unaware(self, driver, ref): """Clear domain_id details if driver is not domain aware.""" if not driver.is_domain_aware() and 'domain_id' in ref: ref = ref.copy() ref.pop('domain_id') return ref def _select_identity_driver(self, domain_id): """Choose a backend driver for the given domain_id. :param domain_id: The domain_id for which we want to find a driver. If the domain_id is specified as None, then this means we need a driver that handles multiple domains. :returns: chosen backend driver If there is a specific driver defined for this domain then choose it. If the domain is None, or there no specific backend for the given domain is found, then we chose the default driver. """ if domain_id is None: driver = self.driver else: driver = (self.domain_configs.get_domain_driver(domain_id) or self.driver) # If the driver is not domain aware (e.g. LDAP) then check to # ensure we are not mapping multiple domains onto it - the only way # that would happen is that the default driver is LDAP and the # domain is anything other than None or the default domain. if (not driver.is_domain_aware() and driver == self.driver and domain_id != CONF.identity.default_domain_id and domain_id is not None): LOG.warning(_LW('Found multiple domains being mapped to a ' 'driver that does not support that (e.g. ' 'LDAP) - Domain ID: %(domain)s, ' 'Default Driver: %(driver)s'), {'domain': domain_id, 'driver': (driver == self.driver)}) raise exception.DomainNotFound(domain_id=domain_id) return driver def _get_domain_driver_and_entity_id(self, public_id): """Look up details using the public ID. :param public_id: the ID provided in the call :returns: domain_id, which can be None to indicate that the driver in question supports multiple domains driver selected based on this domain entity_id which will is understood by the driver. Use the mapping table to look up the domain, driver and local entity that is represented by the provided public ID. Handle the situations where we do not use the mapping (e.g. single driver that understands UUIDs etc.) """ conf = CONF.identity # First, since we don't know anything about the entity yet, we must # assume it needs mapping, so long as we are using domain specific # drivers. if conf.domain_specific_drivers_enabled: local_id_ref = self.id_mapping_api.get_id_mapping(public_id) if local_id_ref: return ( local_id_ref['domain_id'], self._select_identity_driver(local_id_ref['domain_id']), local_id_ref['local_id']) # So either we are using multiple drivers but the public ID is invalid # (and hence was not found in the mapping table), or the public ID is # being handled by the default driver. Either way, the only place left # to look is in that standard driver. However, we don't yet know if # this driver also needs mapping (e.g. LDAP in non backward # compatibility mode). driver = self.driver if driver.generates_uuids(): if driver.is_domain_aware: # No mapping required, and the driver can handle the domain # information itself. The classic case of this is the # current SQL driver. return (None, driver, public_id) else: # Although we don't have any drivers of this type, i.e. that # understand UUIDs but not domains, conceptually you could. return (conf.default_domain_id, driver, public_id) # So the only place left to find the ID is in the default driver which # we now know doesn't generate UUIDs if not CONF.identity_mapping.backward_compatible_ids: # We are not running in backward compatibility mode, so we # must use a mapping. local_id_ref = self.id_mapping_api.get_id_mapping(public_id) if local_id_ref: return ( local_id_ref['domain_id'], driver, local_id_ref['local_id']) else: raise exception.PublicIDNotFound(id=public_id) # If we reach here, this means that the default driver # requires no mapping - but also doesn't understand domains # (e.g. the classic single LDAP driver situation). Hence we pass # back the public_ID unmodified and use the default domain (to # keep backwards compatibility with existing installations). # # It is still possible that the public ID is just invalid in # which case we leave this to the caller to check. return (conf.default_domain_id, driver, public_id) def _assert_user_and_group_in_same_backend( self, user_entity_id, user_driver, group_entity_id, group_driver): """Ensures that user and group IDs are backed by the same backend. Raise a CrossBackendNotAllowed exception if they are not from the same backend, otherwise return None. """ if user_driver is not group_driver: # Determine first if either IDs don't exist by calling # the driver.get methods (which will raise a NotFound # exception). user_driver.get_user(user_entity_id) group_driver.get_group(group_entity_id) # If we get here, then someone is attempting to create a cross # backend membership, which is not allowed. raise exception.CrossBackendNotAllowed(group_id=group_entity_id, user_id=user_entity_id) def _mark_domain_id_filter_satisfied(self, hints): if hints: for filter in hints.filters: if (filter['name'] == 'domain_id' and filter['comparator'] == 'equals'): hints.filters.remove(filter) def _ensure_domain_id_in_hints(self, hints, domain_id): if (domain_id is not None and not hints.get_exact_filter_by_name('domain_id')): hints.add_filter('domain_id', domain_id) def _set_list_limit_in_hints(self, hints, driver): """Set list limit in hints from driver If a hints list is provided, the wrapper will insert the relevant limit into the hints so that the underlying driver call can try and honor it. If the driver does truncate the response, it will update the 'truncated' attribute in the 'limit' entry in the hints list, which enables the caller of this function to know if truncation has taken place. If, however, the driver layer is unable to perform truncation, the 'limit' entry is simply left in the hints list for the caller to handle. A _get_list_limit() method is required to be present in the object class hierarchy, which returns the limit for this backend to which we will truncate. If a hints list is not provided in the arguments of the wrapped call then any limits set in the config file are ignored. This allows internal use of such wrapped methods where the entire data set is needed as input for the calculations of some other API (e.g. get role assignments for a given project). This method, specific to identity manager, is used instead of more general response_truncated, because the limit for identity entities can be overriden in domain-specific config files. The driver to use is determined during processing of the passed parameters and response_truncated is designed to set the limit before any processing. """ if hints is None: return list_limit = driver._get_list_limit() if list_limit: hints.set_limit(list_limit) # The actual driver calls - these are pre/post processed here as # part of the Manager layer to make sure we: # # - select the right driver for this domain # - clear/set domain_ids for drivers that do not support domains # - create any ID mapping that might be required @notifications.emit_event('authenticate') @domains_configured @exception_translated('assertion') def authenticate(self, context, user_id, password): domain_id, driver, entity_id = ( self._get_domain_driver_and_entity_id(user_id)) ref = driver.authenticate(entity_id, password) return self._set_domain_id_and_mapping( ref, domain_id, driver, mapping.EntityType.USER) @domains_configured @exception_translated('user') def create_user(self, user_ref, initiator=None): user = user_ref.copy() user['name'] = clean.user_name(user['name']) user.setdefault('enabled', True) user['enabled'] = clean.user_enabled(user['enabled']) domain_id = user['domain_id'] self.resource_api.get_domain(domain_id) # For creating a user, the domain is in the object itself domain_id = user_ref['domain_id'] driver = self._select_identity_driver(domain_id) user = self._clear_domain_id_if_domain_unaware(driver, user) # Generate a local ID - in the future this might become a function of # the underlying driver so that it could conform to rules set down by # that particular driver type. user['id'] = uuid.uuid4().hex ref = driver.create_user(user['id'], user) notifications.Audit.created(self._USER, user['id'], initiator) return self._set_domain_id_and_mapping( ref, domain_id, driver, mapping.EntityType.USER) @domains_configured @exception_translated('user') @MEMOIZE def get_user(self, user_id): domain_id, driver, entity_id = ( self._get_domain_driver_and_entity_id(user_id)) ref = driver.get_user(entity_id) return self._set_domain_id_and_mapping( ref, domain_id, driver, mapping.EntityType.USER) def assert_user_enabled(self, user_id, user=None): """Assert the user and the user's domain are enabled. :raise AssertionError if the user or the user's domain is disabled. """ if user is None: user = self.get_user(user_id) self.resource_api.assert_domain_enabled(user['domain_id']) if not user.get('enabled', True): raise AssertionError(_('User is disabled: %s') % user_id) @domains_configured @exception_translated('user') @MEMOIZE def get_user_by_name(self, user_name, domain_id): driver = self._select_identity_driver(domain_id) ref = driver.get_user_by_name(user_name, domain_id) return self._set_domain_id_and_mapping( ref, domain_id, driver, mapping.EntityType.USER) @domains_configured @exception_translated('user') def list_users(self, domain_scope=None, hints=None): driver = self._select_identity_driver(domain_scope) self._set_list_limit_in_hints(hints, driver) hints = hints or driver_hints.Hints() if driver.is_domain_aware(): # Force the domain_scope into the hint to ensure that we only get # back domains for that scope. self._ensure_domain_id_in_hints(hints, domain_scope) else: # We are effectively satisfying any domain_id filter by the above # driver selection, so remove any such filter. self._mark_domain_id_filter_satisfied(hints) ref_list = driver.list_users(hints) return self._set_domain_id_and_mapping( ref_list, domain_scope, driver, mapping.EntityType.USER) def _check_update_of_domain_id(self, new_domain, old_domain): if new_domain != old_domain: versionutils.report_deprecated_feature( LOG, _('update of domain_id is deprecated as of Mitaka ' 'and will be removed in O.') ) @domains_configured @exception_translated('user') def update_user(self, user_id, user_ref, initiator=None): old_user_ref = self.get_user(user_id) user = user_ref.copy() if 'name' in user: user['name'] = clean.user_name(user['name']) if 'enabled' in user: user['enabled'] = clean.user_enabled(user['enabled']) if 'domain_id' in user: self._check_update_of_domain_id(user['domain_id'], old_user_ref['domain_id']) self.resource_api.get_domain(user['domain_id']) if 'id' in user: if user_id != user['id']: raise exception.ValidationError(_('Cannot change user ID')) # Since any ID in the user dict is now irrelevant, remove its so as # the driver layer won't be confused by the fact the this is the # public ID not the local ID user.pop('id') domain_id, driver, entity_id = ( self._get_domain_driver_and_entity_id(user_id)) user = self._clear_domain_id_if_domain_unaware(driver, user) self.get_user.invalidate(self, old_user_ref['id']) self.get_user_by_name.invalidate(self, old_user_ref['name'], old_user_ref['domain_id']) ref = driver.update_user(entity_id, user) notifications.Audit.updated(self._USER, user_id, initiator) enabled_change = ((user.get('enabled') is False) and user['enabled'] != old_user_ref.get('enabled')) if enabled_change or user.get('password') is not None: self.emit_invalidate_user_token_persistence(user_id) return self._set_domain_id_and_mapping( ref, domain_id, driver, mapping.EntityType.USER) @domains_configured @exception_translated('user') def delete_user(self, user_id, initiator=None): domain_id, driver, entity_id = ( self._get_domain_driver_and_entity_id(user_id)) # Get user details to invalidate the cache. user_old = self.get_user(user_id) driver.delete_user(entity_id) self.assignment_api.delete_user_assignments(user_id) self.get_user.invalidate(self, user_id) self.get_user_by_name.invalidate(self, user_old['name'], user_old['domain_id']) self.credential_api.delete_credentials_for_user(user_id) self.id_mapping_api.delete_id_mapping(user_id) notifications.Audit.deleted(self._USER, user_id, initiator) # Invalidate user role assignments cache region, as it may be caching # role assignments where the actor is the specified user assignment.COMPUTED_ASSIGNMENTS_REGION.invalidate() @domains_configured @exception_translated('group') def create_group(self, group_ref, initiator=None): group = group_ref.copy() group.setdefault('description', '') domain_id = group['domain_id'] self.resource_api.get_domain(domain_id) # For creating a group, the domain is in the object itself domain_id = group_ref['domain_id'] driver = self._select_identity_driver(domain_id) group = self._clear_domain_id_if_domain_unaware(driver, group) # Generate a local ID - in the future this might become a function of # the underlying driver so that it could conform to rules set down by # that particular driver type. group['id'] = uuid.uuid4().hex ref = driver.create_group(group['id'], group) notifications.Audit.created(self._GROUP, group['id'], initiator) return self._set_domain_id_and_mapping( ref, domain_id, driver, mapping.EntityType.GROUP) @domains_configured @exception_translated('group') @MEMOIZE def get_group(self, group_id): domain_id, driver, entity_id = ( self._get_domain_driver_and_entity_id(group_id)) ref = driver.get_group(entity_id) return self._set_domain_id_and_mapping( ref, domain_id, driver, mapping.EntityType.GROUP) @domains_configured @exception_translated('group') def get_group_by_name(self, group_name, domain_id): driver = self._select_identity_driver(domain_id) ref = driver.get_group_by_name(group_name, domain_id) return self._set_domain_id_and_mapping( ref, domain_id, driver, mapping.EntityType.GROUP) @domains_configured @exception_translated('group') def update_group(self, group_id, group, initiator=None): if 'domain_id' in group: old_group_ref = self.get_group(group_id) self._check_update_of_domain_id(group['domain_id'], old_group_ref['domain_id']) self.resource_api.get_domain(group['domain_id']) domain_id, driver, entity_id = ( self._get_domain_driver_and_entity_id(group_id)) group = self._clear_domain_id_if_domain_unaware(driver, group) ref = driver.update_group(entity_id, group) self.get_group.invalidate(self, group_id) notifications.Audit.updated(self._GROUP, group_id, initiator) return self._set_domain_id_and_mapping( ref, domain_id, driver, mapping.EntityType.GROUP) @domains_configured @exception_translated('group') def delete_group(self, group_id, initiator=None): domain_id, driver, entity_id = ( self._get_domain_driver_and_entity_id(group_id)) user_ids = (u['id'] for u in self.list_users_in_group(group_id)) driver.delete_group(entity_id) self.get_group.invalidate(self, group_id) self.id_mapping_api.delete_id_mapping(group_id) self.assignment_api.delete_group_assignments(group_id) notifications.Audit.deleted(self._GROUP, group_id, initiator) for uid in user_ids: self.emit_invalidate_user_token_persistence(uid) # Invalidate user role assignments cache region, as it may be caching # role assignments expanded from the specified group to its users assignment.COMPUTED_ASSIGNMENTS_REGION.invalidate() @domains_configured @exception_translated('group') def add_user_to_group(self, user_id, group_id, initiator=None): @exception_translated('user') def get_entity_info_for_user(public_id): return self._get_domain_driver_and_entity_id(public_id) _domain_id, group_driver, group_entity_id = ( self._get_domain_driver_and_entity_id(group_id)) # Get the same info for the user_id, taking care to map any # exceptions correctly _domain_id, user_driver, user_entity_id = ( get_entity_info_for_user(user_id)) self._assert_user_and_group_in_same_backend( user_entity_id, user_driver, group_entity_id, group_driver) group_driver.add_user_to_group(user_entity_id, group_entity_id) # Invalidate user role assignments cache region, as it may now need to # include role assignments from the specified group to its users assignment.COMPUTED_ASSIGNMENTS_REGION.invalidate() notifications.Audit.added_to(self._GROUP, group_id, self._USER, user_id, initiator) @domains_configured @exception_translated('group') def remove_user_from_group(self, user_id, group_id, initiator=None): @exception_translated('user') def get_entity_info_for_user(public_id): return self._get_domain_driver_and_entity_id(public_id) _domain_id, group_driver, group_entity_id = ( self._get_domain_driver_and_entity_id(group_id)) # Get the same info for the user_id, taking care to map any # exceptions correctly _domain_id, user_driver, user_entity_id = ( get_entity_info_for_user(user_id)) self._assert_user_and_group_in_same_backend( user_entity_id, user_driver, group_entity_id, group_driver) group_driver.remove_user_from_group(user_entity_id, group_entity_id) self.emit_invalidate_user_token_persistence(user_id) # Invalidate user role assignments cache region, as it may be caching # role assignments expanded from this group to this user assignment.COMPUTED_ASSIGNMENTS_REGION.invalidate() notifications.Audit.removed_from(self._GROUP, group_id, self._USER, user_id, initiator) def emit_invalidate_user_token_persistence(self, user_id): """Emit a notification to the callback system to revoke user tokens. This method and associated callback listener removes the need for making a direct call to another manager to delete and revoke tokens. :param user_id: user identifier :type user_id: string """ notifications.Audit.internal( notifications.INVALIDATE_USER_TOKEN_PERSISTENCE, user_id ) def emit_invalidate_grant_token_persistence(self, user_project): """Emit a notification to the callback system to revoke grant tokens. This method and associated callback listener removes the need for making a direct call to another manager to delete and revoke tokens. :param user_project: {'user_id': user_id, 'project_id': project_id} :type user_project: dict """ notifications.Audit.internal( notifications.INVALIDATE_USER_PROJECT_TOKEN_PERSISTENCE, user_project ) @domains_configured @exception_translated('user') def list_groups_for_user(self, user_id, hints=None): domain_id, driver, entity_id = ( self._get_domain_driver_and_entity_id(user_id)) self._set_list_limit_in_hints(hints, driver) hints = hints or driver_hints.Hints() if not driver.is_domain_aware(): # We are effectively satisfying any domain_id filter by the above # driver selection, so remove any such filter self._mark_domain_id_filter_satisfied(hints) ref_list = driver.list_groups_for_user(entity_id, hints) return self._set_domain_id_and_mapping( ref_list, domain_id, driver, mapping.EntityType.GROUP) @domains_configured @exception_translated('group') def list_groups(self, domain_scope=None, hints=None): driver = self._select_identity_driver(domain_scope) self._set_list_limit_in_hints(hints, driver) hints = hints or driver_hints.Hints() if driver.is_domain_aware(): # Force the domain_scope into the hint to ensure that we only get # back domains for that scope. self._ensure_domain_id_in_hints(hints, domain_scope) else: # We are effectively satisfying any domain_id filter by the above # driver selection, so remove any such filter. self._mark_domain_id_filter_satisfied(hints) ref_list = driver.list_groups(hints) return self._set_domain_id_and_mapping( ref_list, domain_scope, driver, mapping.EntityType.GROUP) @domains_configured @exception_translated('group') def list_users_in_group(self, group_id, hints=None): domain_id, driver, entity_id = ( self._get_domain_driver_and_entity_id(group_id)) self._set_list_limit_in_hints(hints, driver) hints = hints or driver_hints.Hints() if not driver.is_domain_aware(): # We are effectively satisfying any domain_id filter by the above # driver selection, so remove any such filter self._mark_domain_id_filter_satisfied(hints) ref_list = driver.list_users_in_group(entity_id, hints) return self._set_domain_id_and_mapping( ref_list, domain_id, driver, mapping.EntityType.USER) @domains_configured @exception_translated('group') def check_user_in_group(self, user_id, group_id): @exception_translated('user') def get_entity_info_for_user(public_id): return self._get_domain_driver_and_entity_id(public_id) _domain_id, group_driver, group_entity_id = ( self._get_domain_driver_and_entity_id(group_id)) # Get the same info for the user_id, taking care to map any # exceptions correctly _domain_id, user_driver, user_entity_id = ( get_entity_info_for_user(user_id)) self._assert_user_and_group_in_same_backend( user_entity_id, user_driver, group_entity_id, group_driver) return group_driver.check_user_in_group(user_entity_id, group_entity_id) @domains_configured def change_password(self, context, user_id, original_password, new_password): # authenticate() will raise an AssertionError if authentication fails self.authenticate(context, user_id, original_password) update_dict = {'password': new_password} self.update_user(user_id, update_dict) @MEMOIZE def shadow_federated_user(self, idp_id, protocol_id, unique_id, display_name): """Shadows a federated user by mapping to a user. :param idp_id: identity provider id :param protocol_id: protocol id :param unique_id: unique id for the user within the IdP :param display_name: user's display name :returns: dictionary of the mapped User entity """ user_dict = {} try: user_dict = self.shadow_users_api.get_federated_user( idp_id, protocol_id, unique_id) self.shadow_users_api.update_federated_user_display_name( idp_id, protocol_id, unique_id, display_name) except exception.UserNotFound: federated_dict = { 'idp_id': idp_id, 'protocol_id': protocol_id, 'unique_id': unique_id, 'display_name': display_name } user_dict = self.shadow_users_api.create_federated_user( federated_dict) return user_dict @six.add_metaclass(abc.ABCMeta) class IdentityDriverV8(object): """Interface description for an Identity driver.""" def _get_conf(self): try: return self.conf or CONF except AttributeError: return CONF def _get_list_limit(self): conf = self._get_conf() # use list_limit from domain-specific config. If list_limit in # domain-specific config is not set, look it up in the default config return (conf.identity.list_limit or conf.list_limit or CONF.identity.list_limit or CONF.list_limit) def is_domain_aware(self): """Indicates if Driver supports domains.""" return True def default_assignment_driver(self): # TODO(morganfainberg): To be removed when assignment driver based # upon [identity]/driver option is removed in the "O" release. return 'sql' @property def is_sql(self): """Indicates if this Driver uses SQL.""" return False @property def multiple_domains_supported(self): return (self.is_domain_aware() or CONF.identity.domain_specific_drivers_enabled) def generates_uuids(self): """Indicates if Driver generates UUIDs as the local entity ID.""" return True @abc.abstractmethod def authenticate(self, user_id, password): """Authenticate a given user and password. :returns: user_ref :raises AssertionError: If user or password is invalid. """ raise exception.NotImplemented() # pragma: no cover # user crud @abc.abstractmethod def create_user(self, user_id, user): """Creates a new user. :raises keystone.exception.Conflict: If a duplicate user exists. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_users(self, hints): """List users in the system. :param hints: filter hints which the driver should implement if at all possible. :returns: a list of user_refs or an empty list. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_users_in_group(self, group_id, hints): """List users in a group. :param group_id: the group in question :param hints: filter hints which the driver should implement if at all possible. :returns: a list of user_refs or an empty list. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_user(self, user_id): """Get a user by ID. :returns: user_ref :raises keystone.exception.UserNotFound: If the user doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def update_user(self, user_id, user): """Updates an existing user. :raises keystone.exception.UserNotFound: If the user doesn't exist. :raises keystone.exception.Conflict: If a duplicate user exists. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def add_user_to_group(self, user_id, group_id): """Adds a user to a group. :raises keystone.exception.UserNotFound: If the user doesn't exist. :raises keystone.exception.GroupNotFound: If the group doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def check_user_in_group(self, user_id, group_id): """Checks if a user is a member of a group. :raises keystone.exception.UserNotFound: If the user doesn't exist. :raises keystone.exception.GroupNotFound: If the group doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def remove_user_from_group(self, user_id, group_id): """Removes a user from a group. :raises keystone.exception.NotFound: If the entity not found. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_user(self, user_id): """Deletes an existing user. :raises keystone.exception.UserNotFound: If the user doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_user_by_name(self, user_name, domain_id): """Get a user by name. :returns: user_ref :raises keystone.exception.UserNotFound: If the user doesn't exist. """ raise exception.NotImplemented() # pragma: no cover # group crud @abc.abstractmethod def create_group(self, group_id, group): """Creates a new group. :raises keystone.exception.Conflict: If a duplicate group exists. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_groups(self, hints): """List groups in the system. :param hints: filter hints which the driver should implement if at all possible. :returns: a list of group_refs or an empty list. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_groups_for_user(self, user_id, hints): """List groups a user is in :param user_id: the user in question :param hints: filter hints which the driver should implement if at all possible. :returns: a list of group_refs or an empty list. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_group(self, group_id): """Get a group by ID. :returns: group_ref :raises keystone.exception.GroupNotFound: If the group doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_group_by_name(self, group_name, domain_id): """Get a group by name. :returns: group_ref :raises keystone.exception.GroupNotFound: If the group doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def update_group(self, group_id, group): """Updates an existing group. :raises keystone.exception.GroupNotFound: If the group doesn't exist. :raises keystone.exception.Conflict: If a duplicate group exists. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_group(self, group_id): """Deletes an existing group. :raises keystone.exception.GroupNotFound: If the group doesn't exist. """ raise exception.NotImplemented() # pragma: no cover # end of identity Driver = manager.create_legacy_driver(IdentityDriverV8) @dependency.provider('id_mapping_api') class MappingManager(manager.Manager): """Default pivot point for the ID Mapping backend.""" driver_namespace = 'keystone.identity.id_mapping' def __init__(self): super(MappingManager, self).__init__(CONF.identity_mapping.driver) @six.add_metaclass(abc.ABCMeta) class MappingDriverV8(object): """Interface description for an ID Mapping driver.""" @abc.abstractmethod def get_public_id(self, local_entity): """Returns the public ID for the given local entity. :param dict local_entity: Containing the entity domain, local ID and type ('user' or 'group'). :returns: public ID, or None if no mapping is found. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_id_mapping(self, public_id): """Returns the local mapping. :param public_id: The public ID for the mapping required. :returns dict: Containing the entity domain, local ID and type. If no mapping is found, it returns None. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def create_id_mapping(self, local_entity, public_id=None): """Create and store a mapping to a public_id. :param dict local_entity: Containing the entity domain, local ID and type ('user' or 'group'). :param public_id: If specified, this will be the public ID. If this is not specified, a public ID will be generated. :returns: public ID """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_id_mapping(self, public_id): """Deletes an entry for the given public_id. :param public_id: The public ID for the mapping to be deleted. The method is silent if no mapping is found. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def purge_mappings(self, purge_filter): """Purge selected identity mappings. :param dict purge_filter: Containing the attributes of the filter that defines which entries to purge. An empty filter means purge all mappings. """ raise exception.NotImplemented() # pragma: no cover MappingDriver = manager.create_legacy_driver(MappingDriverV8) @dependency.provider('shadow_users_api') class ShadowUsersManager(manager.Manager): """Default pivot point for the Shadow Users backend.""" driver_namespace = 'keystone.identity.shadow_users' def __init__(self): super(ShadowUsersManager, self).__init__(CONF.shadow_users.driver) @six.add_metaclass(abc.ABCMeta) class ShadowUsersDriverV9(object): """Interface description for an Shadow Users driver.""" @abc.abstractmethod def create_federated_user(self, federated_dict): """Create a new user with the federated identity :param dict federated_dict: Reference to the federated user :param user_id: user ID for linking to the federated identity :returns dict: Containing the user reference """ raise exception.NotImplemented() @abc.abstractmethod def get_federated_user(self, idp_id, protocol_id, unique_id): """Returns the found user for the federated identity :param idp_id: The identity provider ID :param protocol_id: The federation protocol ID :param unique_id: The unique ID for the user :returns dict: Containing the user reference """ raise exception.NotImplemented() @abc.abstractmethod def update_federated_user_display_name(self, idp_id, protocol_id, unique_id, display_name): """Updates federated user's display name if changed :param idp_id: The identity provider ID :param protocol_id: The federation protocol ID :param unique_id: The unique ID for the user :param display_name: The user's display name """ raise exception.NotImplemented() keystone-9.0.0/keystone/identity/generator.py0000664000567000056710000000315012701407102022545 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ID Generator provider interface.""" import abc from oslo_config import cfg import six from keystone.common import dependency from keystone.common import manager from keystone import exception CONF = cfg.CONF @dependency.provider('id_generator_api') class Manager(manager.Manager): """Default pivot point for the identifier generator backend.""" driver_namespace = 'keystone.identity.id_generator' def __init__(self): super(Manager, self).__init__(CONF.identity_mapping.generator) @six.add_metaclass(abc.ABCMeta) class IDGenerator(object): """Interface description for an ID Generator provider.""" @abc.abstractmethod def generate_public_ID(self, mapping): """Return a Public ID for the given mapping dict. :param dict mapping: The items to be hashed. The ID must be reproducible and no more than 64 chars in length. The ID generated should be independent of the order of the items in the mapping dict. """ raise exception.NotImplemented() # pragma: no cover keystone-9.0.0/keystone/identity/controllers.py0000664000567000056710000003504312701407102023133 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Workflow Logic the Identity service.""" from oslo_config import cfg from oslo_log import log from keystone.common import controller from keystone.common import dependency from keystone.common import validation from keystone import exception from keystone.i18n import _, _LW from keystone.identity import schema from keystone import notifications CONF = cfg.CONF LOG = log.getLogger(__name__) @dependency.requires('assignment_api', 'identity_api', 'resource_api') class User(controller.V2Controller): @controller.v2_deprecated def get_user(self, context, user_id): self.assert_admin(context) ref = self.identity_api.get_user(user_id) return {'user': self.v3_to_v2_user(ref)} @controller.v2_deprecated def get_users(self, context): # NOTE(termie): i can't imagine that this really wants all the data # about every single user in the system... if 'name' in context['query_string']: return self.get_user_by_name( context, context['query_string'].get('name')) self.assert_admin(context) user_list = self.identity_api.list_users( CONF.identity.default_domain_id) return {'users': self.v3_to_v2_user(user_list)} @controller.v2_deprecated def get_user_by_name(self, context, user_name): self.assert_admin(context) ref = self.identity_api.get_user_by_name( user_name, CONF.identity.default_domain_id) return {'user': self.v3_to_v2_user(ref)} # CRUD extension @controller.v2_deprecated def create_user(self, context, user): user = self._normalize_OSKSADM_password_on_request(user) user = self.normalize_username_in_request(user) user = self._normalize_dict(user) self.assert_admin(context) if 'name' not in user or not user['name']: msg = _('Name field is required and cannot be empty') raise exception.ValidationError(message=msg) if 'enabled' in user and not isinstance(user['enabled'], bool): msg = _('Enabled field must be a boolean') raise exception.ValidationError(message=msg) default_project_id = user.pop('tenantId', None) if default_project_id is not None: # Check to see if the project is valid before moving on. self.resource_api.get_project(default_project_id) user['default_project_id'] = default_project_id self.resource_api.ensure_default_domain_exists() # The manager layer will generate the unique ID for users user_ref = self._normalize_domain_id(context, user.copy()) initiator = notifications._get_request_audit_info(context) new_user_ref = self.v3_to_v2_user( self.identity_api.create_user(user_ref, initiator)) if default_project_id is not None: self.assignment_api.add_user_to_project(default_project_id, new_user_ref['id']) return {'user': new_user_ref} @controller.v2_deprecated def update_user(self, context, user_id, user): # NOTE(termie): this is really more of a patch than a put user = self.normalize_username_in_request(user) self.assert_admin(context) if 'enabled' in user and not isinstance(user['enabled'], bool): msg = _('Enabled field should be a boolean') raise exception.ValidationError(message=msg) default_project_id = user.pop('tenantId', None) if default_project_id is not None: user['default_project_id'] = default_project_id old_user_ref = self.v3_to_v2_user( self.identity_api.get_user(user_id)) # Check whether a tenant is being added or changed for the user. # Catch the case where the tenant is being changed for a user and also # where a user previously had no tenant but a tenant is now being # added for the user. if (('tenantId' in old_user_ref and old_user_ref['tenantId'] != default_project_id and default_project_id is not None) or ('tenantId' not in old_user_ref and default_project_id is not None)): # Make sure the new project actually exists before we perform the # user update. self.resource_api.get_project(default_project_id) initiator = notifications._get_request_audit_info(context) user_ref = self.v3_to_v2_user( self.identity_api.update_user(user_id, user, initiator)) # If 'tenantId' is in either ref, we might need to add or remove the # user from a project. if 'tenantId' in user_ref or 'tenantId' in old_user_ref: if user_ref['tenantId'] != old_user_ref.get('tenantId'): if old_user_ref.get('tenantId'): try: member_role_id = CONF.member_role_id self.assignment_api.remove_role_from_user_and_project( user_id, old_user_ref['tenantId'], member_role_id) except exception.NotFound: # NOTE(morganfainberg): This is not a critical error it # just means that the user cannot be removed from the # old tenant. This could occur if roles aren't found # or if the project is invalid or if there are no roles # for the user on that project. msg = _LW('Unable to remove user %(user)s from ' '%(tenant)s.') LOG.warning(msg, {'user': user_id, 'tenant': old_user_ref['tenantId']}) if user_ref['tenantId']: try: self.assignment_api.add_user_to_project( user_ref['tenantId'], user_id) except exception.Conflict: # nosec # We are already a member of that tenant pass except exception.NotFound: # NOTE(morganfainberg): Log this and move on. This is # not the end of the world if we can't add the user to # the appropriate tenant. Most of the time this means # that the project is invalid or roles are some how # incorrect. This shouldn't prevent the return of the # new ref. msg = _LW('Unable to add user %(user)s to %(tenant)s.') LOG.warning(msg, {'user': user_id, 'tenant': user_ref['tenantId']}) return {'user': user_ref} @controller.v2_deprecated def delete_user(self, context, user_id): self.assert_admin(context) initiator = notifications._get_request_audit_info(context) self.identity_api.delete_user(user_id, initiator) @controller.v2_deprecated def set_user_enabled(self, context, user_id, user): return self.update_user(context, user_id, user) @controller.v2_deprecated def set_user_password(self, context, user_id, user): user = self._normalize_OSKSADM_password_on_request(user) return self.update_user(context, user_id, user) @staticmethod def _normalize_OSKSADM_password_on_request(ref): """Sets the password from the OS-KSADM Admin Extension. The OS-KSADM Admin Extension documentation says that `OS-KSADM:password` can be used in place of `password`. """ if 'OS-KSADM:password' in ref: ref['password'] = ref.pop('OS-KSADM:password') return ref @dependency.requires('identity_api') class UserV3(controller.V3Controller): collection_name = 'users' member_name = 'user' def __init__(self): super(UserV3, self).__init__() self.get_member_from_driver = self.identity_api.get_user def _check_user_and_group_protection(self, context, prep_info, user_id, group_id): ref = {} ref['user'] = self.identity_api.get_user(user_id) ref['group'] = self.identity_api.get_group(group_id) self.check_protection(context, prep_info, ref) @controller.protected() @validation.validated(schema.user_create, 'user') def create_user(self, context, user): # The manager layer will generate the unique ID for users ref = self._normalize_dict(user) ref = self._normalize_domain_id(context, ref) initiator = notifications._get_request_audit_info(context) ref = self.identity_api.create_user(ref, initiator) return UserV3.wrap_member(context, ref) @controller.filterprotected('domain_id', 'enabled', 'name') def list_users(self, context, filters): hints = UserV3.build_driver_hints(context, filters) refs = self.identity_api.list_users( domain_scope=self._get_domain_id_for_list_request(context), hints=hints) return UserV3.wrap_collection(context, refs, hints=hints) @controller.filterprotected('domain_id', 'enabled', 'name') def list_users_in_group(self, context, filters, group_id): hints = UserV3.build_driver_hints(context, filters) refs = self.identity_api.list_users_in_group(group_id, hints=hints) return UserV3.wrap_collection(context, refs, hints=hints) @controller.protected() def get_user(self, context, user_id): ref = self.identity_api.get_user(user_id) return UserV3.wrap_member(context, ref) def _update_user(self, context, user_id, user): self._require_matching_id(user_id, user) self._require_matching_domain_id( user_id, user, self.identity_api.get_user) initiator = notifications._get_request_audit_info(context) ref = self.identity_api.update_user(user_id, user, initiator) return UserV3.wrap_member(context, ref) @controller.protected() @validation.validated(schema.user_update, 'user') def update_user(self, context, user_id, user): return self._update_user(context, user_id, user) @controller.protected(callback=_check_user_and_group_protection) def add_user_to_group(self, context, user_id, group_id): initiator = notifications._get_request_audit_info(context) self.identity_api.add_user_to_group(user_id, group_id, initiator) @controller.protected(callback=_check_user_and_group_protection) def check_user_in_group(self, context, user_id, group_id): return self.identity_api.check_user_in_group(user_id, group_id) @controller.protected(callback=_check_user_and_group_protection) def remove_user_from_group(self, context, user_id, group_id): initiator = notifications._get_request_audit_info(context) self.identity_api.remove_user_from_group(user_id, group_id, initiator) @controller.protected() def delete_user(self, context, user_id): initiator = notifications._get_request_audit_info(context) return self.identity_api.delete_user(user_id, initiator) @controller.protected() def change_password(self, context, user_id, user): original_password = user.get('original_password') if original_password is None: raise exception.ValidationError(target='user', attribute='original_password') password = user.get('password') if password is None: raise exception.ValidationError(target='user', attribute='password') try: self.identity_api.change_password( context, user_id, original_password, password) except AssertionError: raise exception.Unauthorized() @dependency.requires('identity_api') class GroupV3(controller.V3Controller): collection_name = 'groups' member_name = 'group' def __init__(self): super(GroupV3, self).__init__() self.get_member_from_driver = self.identity_api.get_group @controller.protected() @validation.validated(schema.group_create, 'group') def create_group(self, context, group): # The manager layer will generate the unique ID for groups ref = self._normalize_dict(group) ref = self._normalize_domain_id(context, ref) initiator = notifications._get_request_audit_info(context) ref = self.identity_api.create_group(ref, initiator) return GroupV3.wrap_member(context, ref) @controller.filterprotected('domain_id', 'name') def list_groups(self, context, filters): hints = GroupV3.build_driver_hints(context, filters) refs = self.identity_api.list_groups( domain_scope=self._get_domain_id_for_list_request(context), hints=hints) return GroupV3.wrap_collection(context, refs, hints=hints) @controller.filterprotected('name') def list_groups_for_user(self, context, filters, user_id): hints = GroupV3.build_driver_hints(context, filters) refs = self.identity_api.list_groups_for_user(user_id, hints=hints) return GroupV3.wrap_collection(context, refs, hints=hints) @controller.protected() def get_group(self, context, group_id): ref = self.identity_api.get_group(group_id) return GroupV3.wrap_member(context, ref) @controller.protected() @validation.validated(schema.group_update, 'group') def update_group(self, context, group_id, group): self._require_matching_id(group_id, group) self._require_matching_domain_id( group_id, group, self.identity_api.get_group) initiator = notifications._get_request_audit_info(context) ref = self.identity_api.update_group(group_id, group, initiator) return GroupV3.wrap_member(context, ref) @controller.protected() def delete_group(self, context, group_id): initiator = notifications._get_request_audit_info(context) self.identity_api.delete_group(group_id, initiator) keystone-9.0.0/keystone/identity/mapping_backends/0000775000567000056710000000000012701407246023504 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/identity/mapping_backends/__init__.py0000664000567000056710000000000012701407102025572 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/identity/mapping_backends/sql.py0000664000567000056710000001031112701407102024640 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common import dependency from keystone.common import sql from keystone import identity from keystone.identity.mapping_backends import mapping as identity_mapping class IDMapping(sql.ModelBase, sql.ModelDictMixin): __tablename__ = 'id_mapping' public_id = sql.Column(sql.String(64), primary_key=True) domain_id = sql.Column(sql.String(64), nullable=False) local_id = sql.Column(sql.String(64), nullable=False) # NOTE(henry-nash): Postgres requires a name to be defined for an Enum entity_type = sql.Column( sql.Enum(identity_mapping.EntityType.USER, identity_mapping.EntityType.GROUP, name='entity_type'), nullable=False) # Unique constraint to ensure you can't store more than one mapping to the # same underlying values __table_args__ = ( sql.UniqueConstraint('domain_id', 'local_id', 'entity_type'),) @dependency.requires('id_generator_api') class Mapping(identity.MappingDriverV8): def get_public_id(self, local_entity): # NOTE(henry-nash): Since the Public ID is regeneratable, rather # than search for the entry using the local entity values, we # could create the hash and do a PK lookup. However this would only # work if we hashed all the entries, even those that already generate # UUIDs, like SQL. Further, this would only work if the generation # algorithm was immutable (e.g. it had always been sha256). with sql.session_for_read() as session: query = session.query(IDMapping.public_id) query = query.filter_by(domain_id=local_entity['domain_id']) query = query.filter_by(local_id=local_entity['local_id']) query = query.filter_by(entity_type=local_entity['entity_type']) try: public_ref = query.one() public_id = public_ref.public_id return public_id except sql.NotFound: return None def get_id_mapping(self, public_id): with sql.session_for_read() as session: mapping_ref = session.query(IDMapping).get(public_id) if mapping_ref: return mapping_ref.to_dict() def create_id_mapping(self, local_entity, public_id=None): entity = local_entity.copy() with sql.session_for_write() as session: if public_id is None: public_id = self.id_generator_api.generate_public_ID(entity) entity['public_id'] = public_id mapping_ref = IDMapping.from_dict(entity) session.add(mapping_ref) return public_id def delete_id_mapping(self, public_id): with sql.session_for_write() as session: try: session.query(IDMapping).filter( IDMapping.public_id == public_id).delete() except sql.NotFound: # nosec # NOTE(morganfainberg): There is nothing to delete and nothing # to do. pass def purge_mappings(self, purge_filter): with sql.session_for_write() as session: query = session.query(IDMapping) if 'domain_id' in purge_filter: query = query.filter_by(domain_id=purge_filter['domain_id']) if 'public_id' in purge_filter: query = query.filter_by(public_id=purge_filter['public_id']) if 'local_id' in purge_filter: query = query.filter_by(local_id=purge_filter['local_id']) if 'entity_type' in purge_filter: query = query.filter_by( entity_type=purge_filter['entity_type']) query.delete() keystone-9.0.0/keystone/identity/mapping_backends/mapping.py0000664000567000056710000000120012701407102025471 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class EntityType(object): USER = 'user' GROUP = 'group' keystone-9.0.0/keystone/identity/id_generators/0000775000567000056710000000000012701407246023044 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/identity/id_generators/__init__.py0000664000567000056710000000000012701407102025132 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/identity/id_generators/sha256.py0000664000567000056710000000157012701407102024420 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import hashlib import six from keystone.identity import generator class Generator(generator.IDGenerator): def generate_public_ID(self, mapping): m = hashlib.sha256() for key in sorted(six.iterkeys(mapping)): m.update(mapping[key].encode('utf-8')) return m.hexdigest() keystone-9.0.0/keystone/identity/routers.py0000664000567000056710000000606312701407105022273 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """WSGI Routers for the Identity service.""" from keystone.common import json_home from keystone.common import router from keystone.common import wsgi from keystone.identity import controllers class Admin(wsgi.ComposableRouter): def add_routes(self, mapper): # User Operations user_controller = controllers.User() mapper.connect('/users/{user_id}', controller=user_controller, action='get_user', conditions=dict(method=['GET'])) class Routers(wsgi.RoutersBase): def append_v3_routers(self, mapper, routers): user_controller = controllers.UserV3() routers.append( router.Router(user_controller, 'users', 'user', resource_descriptions=self.v3_resources)) self._add_resource( mapper, user_controller, path='/users/{user_id}/password', post_action='change_password', rel=json_home.build_v3_resource_relation('user_change_password'), path_vars={ 'user_id': json_home.Parameters.USER_ID, }) self._add_resource( mapper, user_controller, path='/groups/{group_id}/users', get_action='list_users_in_group', rel=json_home.build_v3_resource_relation('group_users'), path_vars={ 'group_id': json_home.Parameters.GROUP_ID, }) self._add_resource( mapper, user_controller, path='/groups/{group_id}/users/{user_id}', put_action='add_user_to_group', get_head_action='check_user_in_group', delete_action='remove_user_from_group', rel=json_home.build_v3_resource_relation('group_user'), path_vars={ 'group_id': json_home.Parameters.GROUP_ID, 'user_id': json_home.Parameters.USER_ID, }) group_controller = controllers.GroupV3() routers.append( router.Router(group_controller, 'groups', 'group', resource_descriptions=self.v3_resources)) self._add_resource( mapper, group_controller, path='/users/{user_id}/groups', get_action='list_groups_for_user', rel=json_home.build_v3_resource_relation('user_groups'), path_vars={ 'user_id': json_home.Parameters.USER_ID, }) keystone-9.0.0/keystone/tests/0000775000567000056710000000000012701407246017530 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/__init__.py0000664000567000056710000000000012701407102021616 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/hacking/0000775000567000056710000000000012701407246021134 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/hacking/__init__.py0000664000567000056710000000000012701407102023222 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/hacking/checks.py0000664000567000056710000004116112701407102022740 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Keystone's pep8 extensions. In order to make the review process faster and easier for core devs we are adding some Keystone specific pep8 checks. This will catch common errors so that core devs don't have to. There are two types of pep8 extensions. One is a function that takes either a physical or logical line. The physical or logical line is the first param in the function definition and can be followed by other parameters supported by pep8. The second type is a class that parses AST trees. For more info please see pep8.py. """ import ast import re import six class BaseASTChecker(ast.NodeVisitor): """Provides a simple framework for writing AST-based checks. Subclasses should implement visit_* methods like any other AST visitor implementation. When they detect an error for a particular node the method should call ``self.add_error(offending_node)``. Details about where in the code the error occurred will be pulled from the node object. Subclasses should also provide a class variable named CHECK_DESC to be used for the human readable error message. """ def __init__(self, tree, filename): """This object is created automatically by pep8. :param tree: an AST tree :param filename: name of the file being analyzed (ignored by our checks) """ self._tree = tree self._errors = [] def run(self): """Called automatically by pep8.""" self.visit(self._tree) return self._errors def add_error(self, node, message=None): """Add an error caused by a node to the list of errors for pep8.""" message = message or self.CHECK_DESC error = (node.lineno, node.col_offset, message, self.__class__) self._errors.append(error) class CheckForMutableDefaultArgs(BaseASTChecker): """Checks for the use of mutable objects as function/method defaults. We are only checking for list and dict literals at this time. This means that a developer could specify an instance of their own and cause a bug. The fix for this is probably more work than it's worth because it will get caught during code review. """ CHECK_DESC = 'K001 Using mutable as a function/method default' MUTABLES = ( ast.List, ast.ListComp, ast.Dict, ast.DictComp, ast.Set, ast.SetComp, ast.Call) def visit_FunctionDef(self, node): for arg in node.args.defaults: if isinstance(arg, self.MUTABLES): self.add_error(arg) super(CheckForMutableDefaultArgs, self).generic_visit(node) def block_comments_begin_with_a_space(physical_line, line_number): """There should be a space after the # of block comments. There is already a check in pep8 that enforces this rule for inline comments. Okay: # this is a comment Okay: #!/usr/bin/python Okay: # this is a comment K002: #this is a comment """ MESSAGE = "K002 block comments should start with '# '" # shebangs are OK if line_number == 1 and physical_line.startswith('#!'): return text = physical_line.strip() if text.startswith('#'): # look for block comments if len(text) > 1 and not text[1].isspace(): return physical_line.index('#'), MESSAGE class CheckForAssertingNoneEquality(BaseASTChecker): """Ensures that code does not use a None with assert(Not*)Equal.""" CHECK_DESC_IS = ('K003 Use self.assertIsNone(...) when comparing ' 'against None') CHECK_DESC_ISNOT = ('K004 Use assertIsNotNone(...) when comparing ' ' against None') def visit_Call(self, node): # NOTE(dstanek): I wrote this in a verbose way to make it easier to # read for those that have little experience with Python's AST. def _is_None(node): if six.PY3: return (isinstance(node, ast.NameConstant) and node.value is None) else: return isinstance(node, ast.Name) and node.id == 'None' if isinstance(node.func, ast.Attribute): if node.func.attr == 'assertEqual': for arg in node.args: if _is_None(arg): self.add_error(node, message=self.CHECK_DESC_IS) elif node.func.attr == 'assertNotEqual': for arg in node.args: if _is_None(arg): self.add_error(node, message=self.CHECK_DESC_ISNOT) super(CheckForAssertingNoneEquality, self).generic_visit(node) class CheckForLoggingIssues(BaseASTChecker): DEBUG_CHECK_DESC = 'K005 Using translated string in debug logging' NONDEBUG_CHECK_DESC = 'K006 Not using translating helper for logging' EXCESS_HELPER_CHECK_DESC = 'K007 Using hints when _ is necessary' USING_DEPRECATED_WARN = 'K009 Using the deprecated Logger.warn' LOG_MODULES = ('logging', 'oslo_log.log') I18N_MODULES = ( 'keystone.i18n._', 'keystone.i18n._LI', 'keystone.i18n._LW', 'keystone.i18n._LE', 'keystone.i18n._LC', ) TRANS_HELPER_MAP = { 'debug': None, 'info': '_LI', 'warning': '_LW', 'error': '_LE', 'exception': '_LE', 'critical': '_LC', } def __init__(self, tree, filename): super(CheckForLoggingIssues, self).__init__(tree, filename) self.logger_names = [] self.logger_module_names = [] self.i18n_names = {} # NOTE(dstanek): this kinda accounts for scopes when talking # about only leaf node in the graph self.assignments = {} def generic_visit(self, node): """Called if no explicit visitor function exists for a node.""" for field, value in ast.iter_fields(node): if isinstance(value, list): for item in value: if isinstance(item, ast.AST): item._parent = node self.visit(item) elif isinstance(value, ast.AST): value._parent = node self.visit(value) def _filter_imports(self, module_name, alias): """Keeps lists of logging and i18n imports.""" if module_name in self.LOG_MODULES: self.logger_module_names.append(alias.asname or alias.name) elif module_name in self.I18N_MODULES: self.i18n_names[alias.asname or alias.name] = alias.name def visit_Import(self, node): for alias in node.names: self._filter_imports(alias.name, alias) return super(CheckForLoggingIssues, self).generic_visit(node) def visit_ImportFrom(self, node): for alias in node.names: full_name = '%s.%s' % (node.module, alias.name) self._filter_imports(full_name, alias) return super(CheckForLoggingIssues, self).generic_visit(node) def _find_name(self, node): """Return the fully qualified name or a Name or Attribute.""" if isinstance(node, ast.Name): return node.id elif (isinstance(node, ast.Attribute) and isinstance(node.value, (ast.Name, ast.Attribute))): method_name = node.attr obj_name = self._find_name(node.value) if obj_name is None: return None return obj_name + '.' + method_name elif isinstance(node, six.string_types): return node else: # could be Subscript, Call or many more return None def visit_Assign(self, node): """Look for 'LOG = logging.getLogger' This handles the simple case: name = [logging_module].getLogger(...) - or - name = [i18n_name](...) And some much more comple ones: name = [i18n_name](...) % X - or - self.name = [i18n_name](...) % X """ attr_node_types = (ast.Name, ast.Attribute) if (len(node.targets) != 1 or not isinstance(node.targets[0], attr_node_types)): # say no to: "x, y = ..." return super(CheckForLoggingIssues, self).generic_visit(node) target_name = self._find_name(node.targets[0]) if (isinstance(node.value, ast.BinOp) and isinstance(node.value.op, ast.Mod)): if (isinstance(node.value.left, ast.Call) and isinstance(node.value.left.func, ast.Name) and node.value.left.func.id in self.i18n_names): # NOTE(dstanek): this is done to match cases like: # `msg = _('something %s') % x` node = ast.Assign(value=node.value.left) if not isinstance(node.value, ast.Call): # node.value must be a call to getLogger self.assignments.pop(target_name, None) return super(CheckForLoggingIssues, self).generic_visit(node) # is this a call to an i18n function? if (isinstance(node.value.func, ast.Name) and node.value.func.id in self.i18n_names): self.assignments[target_name] = node.value.func.id return super(CheckForLoggingIssues, self).generic_visit(node) if (not isinstance(node.value.func, ast.Attribute) or not isinstance(node.value.func.value, attr_node_types)): # function must be an attribute on an object like # logging.getLogger return super(CheckForLoggingIssues, self).generic_visit(node) object_name = self._find_name(node.value.func.value) func_name = node.value.func.attr if (object_name in self.logger_module_names and func_name == 'getLogger'): self.logger_names.append(target_name) return super(CheckForLoggingIssues, self).generic_visit(node) def visit_Call(self, node): """Look for the 'LOG.*' calls.""" # obj.method if isinstance(node.func, ast.Attribute): obj_name = self._find_name(node.func.value) if isinstance(node.func.value, ast.Name): method_name = node.func.attr elif isinstance(node.func.value, ast.Attribute): obj_name = self._find_name(node.func.value) method_name = node.func.attr else: # could be Subscript, Call or many more return super(CheckForLoggingIssues, self).generic_visit(node) # if dealing with a logger the method can't be "warn" if obj_name in self.logger_names and method_name == 'warn': msg = node.args[0] # first arg to a logging method is the msg self.add_error(msg, message=self.USING_DEPRECATED_WARN) # must be a logger instance and one of the support logging methods if (obj_name not in self.logger_names or method_name not in self.TRANS_HELPER_MAP): return super(CheckForLoggingIssues, self).generic_visit(node) # the call must have arguments if not node.args: return super(CheckForLoggingIssues, self).generic_visit(node) if method_name == 'debug': self._process_debug(node) elif method_name in self.TRANS_HELPER_MAP: self._process_non_debug(node, method_name) return super(CheckForLoggingIssues, self).generic_visit(node) def _process_debug(self, node): msg = node.args[0] # first arg to a logging method is the msg # if first arg is a call to a i18n name if (isinstance(msg, ast.Call) and isinstance(msg.func, ast.Name) and msg.func.id in self.i18n_names): self.add_error(msg, message=self.DEBUG_CHECK_DESC) # if the first arg is a reference to a i18n call elif (isinstance(msg, ast.Name) and msg.id in self.assignments and not self._is_raised_later(node, msg.id)): self.add_error(msg, message=self.DEBUG_CHECK_DESC) def _process_non_debug(self, node, method_name): msg = node.args[0] # first arg to a logging method is the msg # if first arg is a call to a i18n name if isinstance(msg, ast.Call): try: func_name = msg.func.id except AttributeError: # in the case of logging only an exception, the msg function # will not have an id associated with it, for instance: # LOG.warning(six.text_type(e)) return # the function name is the correct translation helper # for the logging method if func_name == self.TRANS_HELPER_MAP[method_name]: return # the function name is an alias for the correct translation # helper for the loggine method if (self.i18n_names[func_name] == self.TRANS_HELPER_MAP[method_name]): return self.add_error(msg, message=self.NONDEBUG_CHECK_DESC) # if the first arg is not a reference to the correct i18n hint elif isinstance(msg, ast.Name): # FIXME(dstanek): to make sure more robust we should be checking # all names passed into a logging method. we can't right now # because: # 1. We have code like this that we'll fix when dealing with the %: # msg = _('....') % {} # LOG.warning(msg) # 2. We also do LOG.exception(e) in several places. I'm not sure # exactly what we should be doing about that. if msg.id not in self.assignments: return helper_method_name = self.TRANS_HELPER_MAP[method_name] if (self.assignments[msg.id] != helper_method_name and not self._is_raised_later(node, msg.id)): self.add_error(msg, message=self.NONDEBUG_CHECK_DESC) elif (self.assignments[msg.id] == helper_method_name and self._is_raised_later(node, msg.id)): self.add_error(msg, message=self.EXCESS_HELPER_CHECK_DESC) def _is_raised_later(self, node, name): def find_peers(node): node_for_line = node._parent for _field, value in ast.iter_fields(node._parent._parent): if isinstance(value, list) and node_for_line in value: return value[value.index(node_for_line) + 1:] continue return [] peers = find_peers(node) for peer in peers: if isinstance(peer, ast.Raise): if six.PY3: exc = peer.exc else: exc = peer.type if (isinstance(exc, ast.Call) and len(exc.args) > 0 and isinstance(exc.args[0], ast.Name) and name in (a.id for a in exc.args)): return True else: return False elif isinstance(peer, ast.Assign): if name in (t.id for t in peer.targets if hasattr(t, 'id')): return False def dict_constructor_with_sequence_copy(logical_line): """Should use a dict comprehension instead of a dict constructor. PEP-0274 introduced dict comprehension with performance enhancement and it also makes code more readable. Okay: lower_res = {k.lower(): v for k, v in six.iteritems(res[1])} Okay: fool = dict(a='a', b='b') K008: lower_res = dict((k.lower(), v) for k, v in six.iteritems(res[1])) K008: attrs = dict([(k, _from_json(v)) K008: dict([[i,i] for i in range(3)]) """ MESSAGE = ("K008 Must use a dict comprehension instead of a dict" " constructor with a sequence of key-value pairs.") dict_constructor_with_sequence_re = ( re.compile(r".*\bdict\((\[)?(\(|\[)(?!\{)")) if dict_constructor_with_sequence_re.match(logical_line): yield (0, MESSAGE) def factory(register): register(CheckForMutableDefaultArgs) register(block_comments_begin_with_a_space) register(CheckForAssertingNoneEquality) register(CheckForLoggingIssues) register(dict_constructor_with_sequence_copy) keystone-9.0.0/keystone/tests/common/0000775000567000056710000000000012701407246021020 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/common/auth.py0000664000567000056710000001115412701407102022324 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class AuthTestMixin(object): """To hold auth building helper functions.""" def _build_auth_scope(self, project_id=None, project_name=None, project_domain_id=None, project_domain_name=None, domain_id=None, domain_name=None, trust_id=None, unscoped=None): scope_data = {} if unscoped: scope_data['unscoped'] = {} if project_id or project_name: scope_data['project'] = {} if project_id: scope_data['project']['id'] = project_id else: scope_data['project']['name'] = project_name if project_domain_id or project_domain_name: project_domain_json = {} if project_domain_id: project_domain_json['id'] = project_domain_id else: project_domain_json['name'] = project_domain_name scope_data['project']['domain'] = project_domain_json if domain_id or domain_name: scope_data['domain'] = {} if domain_id: scope_data['domain']['id'] = domain_id else: scope_data['domain']['name'] = domain_name if trust_id: scope_data['OS-TRUST:trust'] = {} scope_data['OS-TRUST:trust']['id'] = trust_id return scope_data def _build_auth(self, user_id=None, username=None, user_domain_id=None, user_domain_name=None, **kwargs): # NOTE(dstanek): just to ensure sanity in the tests self.assertEqual(1, len(kwargs), message='_build_auth requires 1 (and only 1) ' 'secret type and value') secret_type, secret_value = list(kwargs.items())[0] # NOTE(dstanek): just to ensure sanity in the tests self.assertIn(secret_type, ('passcode', 'password'), message="_build_auth only supports 'passcode' " "and 'password' secret types") data = {'user': {}} if user_id: data['user']['id'] = user_id else: data['user']['name'] = username if user_domain_id or user_domain_name: data['user']['domain'] = {} if user_domain_id: data['user']['domain']['id'] = user_domain_id else: data['user']['domain']['name'] = user_domain_name data['user'][secret_type] = secret_value return data def _build_token_auth(self, token): return {'id': token} def build_authentication_request(self, token=None, user_id=None, username=None, user_domain_id=None, user_domain_name=None, password=None, kerberos=False, passcode=None, **kwargs): """Build auth dictionary. It will create an auth dictionary based on all the arguments that it receives. """ auth_data = {} auth_data['identity'] = {'methods': []} if kerberos: auth_data['identity']['methods'].append('kerberos') auth_data['identity']['kerberos'] = {} if token: auth_data['identity']['methods'].append('token') auth_data['identity']['token'] = self._build_token_auth(token) if password and (user_id or username): auth_data['identity']['methods'].append('password') auth_data['identity']['password'] = self._build_auth( user_id, username, user_domain_id, user_domain_name, password=password) if passcode and (user_id or username): auth_data['identity']['methods'].append('totp') auth_data['identity']['totp'] = self._build_auth( user_id, username, user_domain_id, user_domain_name, passcode=passcode) if kwargs: auth_data['scope'] = self._build_auth_scope(**kwargs) return {'auth': auth_data} keystone-9.0.0/keystone/tests/common/__init__.py0000664000567000056710000000000012701407102023106 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/0000775000567000056710000000000012701407246020507 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/test_backend_endpoint_policy_sql.py0000664000567000056710000000266412701407102027644 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common import sql from keystone.tests.unit import test_backend_endpoint_policy from keystone.tests.unit import test_backend_sql class SqlPolicyAssociationTable(test_backend_sql.SqlModels): """Set of tests for checking SQL Policy Association Mapping.""" def test_policy_association_mapping(self): cols = (('id', sql.String, 64), ('policy_id', sql.String, 64), ('endpoint_id', sql.String, 64), ('service_id', sql.String, 64), ('region_id', sql.String, 64)) self.assertExpectedSchema('policy_association', cols) class SqlPolicyAssociationTests( test_backend_sql.SqlTests, test_backend_endpoint_policy.PolicyAssociationTests): def load_fixtures(self, fixtures): super(SqlPolicyAssociationTests, self).load_fixtures(fixtures) self.load_sample_data() keystone-9.0.0/keystone/tests/unit/assignment/0000775000567000056710000000000012701407246022657 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/assignment/__init__.py0000664000567000056710000000000012701407102024745 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/assignment/role_backends/0000775000567000056710000000000012701407246025452 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/assignment/role_backends/test_sql.py0000664000567000056710000001132712701407102027655 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from keystone.common import sql from keystone import exception from keystone.tests import unit from keystone.tests.unit.assignment import test_core from keystone.tests.unit.backend import core_sql class SqlRoleModels(core_sql.BaseBackendSqlModels): def test_role_model(self): cols = (('id', sql.String, 64), ('name', sql.String, 255), ('domain_id', sql.String, 64)) self.assertExpectedSchema('role', cols) class SqlRole(core_sql.BaseBackendSqlTests, test_core.RoleTests): def test_create_null_role_name(self): role = unit.new_role_ref(name=None) self.assertRaises(exception.UnexpectedError, self.role_api.create_role, role['id'], role) self.assertRaises(exception.RoleNotFound, self.role_api.get_role, role['id']) def test_create_duplicate_role_domain_specific_name_fails(self): domain = unit.new_domain_ref() role1 = unit.new_role_ref(domain_id=domain['id']) self.role_api.create_role(role1['id'], role1) role2 = unit.new_role_ref(name=role1['name'], domain_id=domain['id']) self.assertRaises(exception.Conflict, self.role_api.create_role, role2['id'], role2) def test_update_domain_id_of_role_fails(self): # Create a global role role1 = unit.new_role_ref() role1 = self.role_api.create_role(role1['id'], role1) # Try and update it to be domain specific domainA = unit.new_domain_ref() role1['domain_id'] = domainA['id'] self.assertRaises(exception.ValidationError, self.role_api.update_role, role1['id'], role1) # Create a domain specific role from scratch role2 = unit.new_role_ref(domain_id=domainA['id']) self.role_api.create_role(role2['id'], role2) # Try to "move" it to another domain domainB = unit.new_domain_ref() role2['domain_id'] = domainB['id'] self.assertRaises(exception.ValidationError, self.role_api.update_role, role2['id'], role2) # Now try to make it global role2['domain_id'] = None self.assertRaises(exception.ValidationError, self.role_api.update_role, role2['id'], role2) def test_domain_specific_separation(self): domain1 = unit.new_domain_ref() role1 = unit.new_role_ref(domain_id=domain1['id']) role_ref1 = self.role_api.create_role(role1['id'], role1) self.assertDictEqual(role1, role_ref1) # Check we can have the same named role in a different domain domain2 = unit.new_domain_ref() role2 = unit.new_role_ref(name=role1['name'], domain_id=domain2['id']) role_ref2 = self.role_api.create_role(role2['id'], role2) self.assertDictEqual(role2, role_ref2) # ...and in fact that you can have the same named role as a global role role3 = unit.new_role_ref(name=role1['name']) role_ref3 = self.role_api.create_role(role3['id'], role3) self.assertDictEqual(role3, role_ref3) # Check that updating one doesn't change the others role1['name'] = uuid.uuid4().hex self.role_api.update_role(role1['id'], role1) role_ref1 = self.role_api.get_role(role1['id']) self.assertDictEqual(role1, role_ref1) role_ref2 = self.role_api.get_role(role2['id']) self.assertDictEqual(role2, role_ref2) role_ref3 = self.role_api.get_role(role3['id']) self.assertDictEqual(role3, role_ref3) # Check that deleting one of these, doesn't affect the others self.role_api.delete_role(role1['id']) self.assertRaises(exception.RoleNotFound, self.role_api.get_role, role1['id']) self.role_api.get_role(role2['id']) self.role_api.get_role(role3['id']) keystone-9.0.0/keystone/tests/unit/assignment/role_backends/__init__.py0000664000567000056710000000000012701407102027540 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/assignment/test_core.py0000664000567000056710000001210012701407102025201 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import uuid from keystone import exception from keystone.tests import unit from keystone.tests.unit import default_fixtures class RoleTests(object): def test_get_role_returns_not_found(self): self.assertRaises(exception.RoleNotFound, self.role_api.get_role, uuid.uuid4().hex) def test_create_duplicate_role_name_fails(self): role = unit.new_role_ref(id='fake1', name='fake1name') self.role_api.create_role('fake1', role) role['id'] = 'fake2' self.assertRaises(exception.Conflict, self.role_api.create_role, 'fake2', role) def test_rename_duplicate_role_name_fails(self): role1 = unit.new_role_ref(id='fake1', name='fake1name') role2 = unit.new_role_ref(id='fake2', name='fake2name') self.role_api.create_role('fake1', role1) self.role_api.create_role('fake2', role2) role1['name'] = 'fake2name' self.assertRaises(exception.Conflict, self.role_api.update_role, 'fake1', role1) def test_role_crud(self): role = unit.new_role_ref() self.role_api.create_role(role['id'], role) role_ref = self.role_api.get_role(role['id']) role_ref_dict = {x: role_ref[x] for x in role_ref} self.assertDictEqual(role, role_ref_dict) role['name'] = uuid.uuid4().hex updated_role_ref = self.role_api.update_role(role['id'], role) role_ref = self.role_api.get_role(role['id']) role_ref_dict = {x: role_ref[x] for x in role_ref} self.assertDictEqual(role, role_ref_dict) self.assertDictEqual(role_ref_dict, updated_role_ref) self.role_api.delete_role(role['id']) self.assertRaises(exception.RoleNotFound, self.role_api.get_role, role['id']) def test_update_role_returns_not_found(self): role = unit.new_role_ref() self.assertRaises(exception.RoleNotFound, self.role_api.update_role, role['id'], role) def test_list_roles(self): roles = self.role_api.list_roles() self.assertEqual(len(default_fixtures.ROLES), len(roles)) role_ids = set(role['id'] for role in roles) expected_role_ids = set(role['id'] for role in default_fixtures.ROLES) self.assertEqual(expected_role_ids, role_ids) @unit.skip_if_cache_disabled('role') def test_cache_layer_role_crud(self): role = unit.new_role_ref() role_id = role['id'] # Create role self.role_api.create_role(role_id, role) role_ref = self.role_api.get_role(role_id) updated_role_ref = copy.deepcopy(role_ref) updated_role_ref['name'] = uuid.uuid4().hex # Update role, bypassing the role api manager self.role_api.driver.update_role(role_id, updated_role_ref) # Verify get_role still returns old ref self.assertDictEqual(role_ref, self.role_api.get_role(role_id)) # Invalidate Cache self.role_api.get_role.invalidate(self.role_api, role_id) # Verify get_role returns the new role_ref self.assertDictEqual(updated_role_ref, self.role_api.get_role(role_id)) # Update role back to original via the assignment api manager self.role_api.update_role(role_id, role_ref) # Verify get_role returns the original role ref self.assertDictEqual(role_ref, self.role_api.get_role(role_id)) # Delete role bypassing the role api manager self.role_api.driver.delete_role(role_id) # Verify get_role still returns the role_ref self.assertDictEqual(role_ref, self.role_api.get_role(role_id)) # Invalidate cache self.role_api.get_role.invalidate(self.role_api, role_id) # Verify RoleNotFound is now raised self.assertRaises(exception.RoleNotFound, self.role_api.get_role, role_id) # recreate role self.role_api.create_role(role_id, role) self.role_api.get_role(role_id) # delete role via the assignment api manager self.role_api.delete_role(role_id) # verity RoleNotFound is now raised self.assertRaises(exception.RoleNotFound, self.role_api.get_role, role_id) keystone-9.0.0/keystone/tests/unit/assignment/test_backends.py0000664000567000056710000055162712701407102026051 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import mock from oslo_config import cfg from six.moves import range from testtools import matchers from keystone import exception from keystone.tests import unit CONF = cfg.CONF class AssignmentTestHelperMixin(object): """Mixin class to aid testing of assignments. This class supports data driven test plans that enable: - Creation of initial entities, such as domains, users, groups, projects and roles - Creation of assignments referencing the above entities - A set of input parameters and expected outputs to list_role_assignments based on the above test data A test plan is a dict of the form: test_plan = { entities: details and number of entities, group_memberships: group-user entity memberships, assignments: list of assignments to create, tests: list of pairs of input params and expected outputs} An example test plan: test_plan = { # First, create the entities required. Entities are specified by # a dict with the key being the entity type and the value an # entity specification which can be one of: # # - a simple number, e.g. {'users': 3} creates 3 users # - a dict where more information regarding the contents of the entity # is required, e.g. {'domains' : {'users : 3}} creates a domain # with three users # - a list of entity specifications if multiple are required # # The following creates a domain that contains a single user, group and # project, as well as creating three roles. 'entities': {'domains': {'users': 1, 'groups': 1, 'projects': 1}, 'roles': 3}, # If it is required that an existing domain be used for the new # entities, then the id of that domain can be included in the # domain dict. For example, if alternatively we wanted to add 3 users # to the default domain, add a second domain containing 3 projects as # well as 5 additional empty domains, the entities would be defined as: # # 'entities': {'domains': [{'id': DEFAULT_DOMAIN, 'users': 3}, # {'projects': 3}, 5]}, # # A project hierarchy can be specified within the 'projects' section by # nesting the 'project' key, for example to create a project with three # sub-projects you would use: 'projects': {'project': 3} # A more complex hierarchy can also be defined, for example the # following would define three projects each containing a # sub-project, each of which contain a further three sub-projects. 'projects': [{'project': {'project': 3}}, {'project': {'project': 3}}, {'project': {'project': 3}}] # If the 'roles' entity count is defined as top level key in 'entities' # dict then these are global roles. If it is placed within the # 'domain' dict, then they will be domain specific roles. A mix of # domain specific and global roles are allowed, with the role index # being calculated in the order they are defined in the 'entities' # dict. # A set of implied role specifications. In this case, prior role # index 0 implies role index 1, and role 1 implies roles 2 and 3. 'roles': [{'role': 0, 'implied_roles': [1]}, {'role': 1, 'implied_roles': [2, 3]}] # A list of groups and their members. In this case make users with # index 0 and 1 members of group with index 0. Users and Groups are # indexed in the order they appear in the 'entities' key above. 'group_memberships': [{'group': 0, 'users': [0, 1]}] # Next, create assignments between the entities, referencing the # entities by index, i.e. 'user': 0 refers to user[0]. Entities are # indexed in the order they appear in the 'entities' key above within # their entity type. 'assignments': [{'user': 0, 'role': 0, 'domain': 0}, {'user': 0, 'role': 1, 'project': 0}, {'group': 0, 'role': 2, 'domain': 0}, {'user': 0, 'role': 2, 'project': 0}], # Finally, define an array of tests where list_role_assignment() is # called with the given input parameters and the results are then # confirmed to be as given in 'results'. Again, all entities are # referenced by index. 'tests': [ {'params': {}, 'results': [{'user': 0, 'role': 0, 'domain': 0}, {'user': 0, 'role': 1, 'project': 0}, {'group': 0, 'role': 2, 'domain': 0}, {'user': 0, 'role': 2, 'project': 0}]}, {'params': {'role': 2}, 'results': [{'group': 0, 'role': 2, 'domain': 0}, {'user': 0, 'role': 2, 'project': 0}]}] # The 'params' key also supports the 'effective', # 'inherited_to_projects' and 'source_from_group_ids' options to # list_role_assignments.} """ def _handle_project_spec(self, test_data, domain_id, project_spec, parent_id=None): """Handle the creation of a project or hierarchy of projects. project_spec may either be a count of the number of projects to create, or it may be a list of the form: [{'project': project_spec}, {'project': project_spec}, ...] This method is called recursively to handle the creation of a hierarchy of projects. """ def _create_project(domain_id, parent_id): new_project = unit.new_project_ref(domain_id=domain_id, parent_id=parent_id) new_project = self.resource_api.create_project(new_project['id'], new_project) return new_project if isinstance(project_spec, list): for this_spec in project_spec: self._handle_project_spec( test_data, domain_id, this_spec, parent_id=parent_id) elif isinstance(project_spec, dict): new_proj = _create_project(domain_id, parent_id) test_data['projects'].append(new_proj) self._handle_project_spec( test_data, domain_id, project_spec['project'], parent_id=new_proj['id']) else: for _ in range(project_spec): test_data['projects'].append( _create_project(domain_id, parent_id)) def _create_role(self, domain_id=None): new_role = unit.new_role_ref(domain_id=domain_id) return self.role_api.create_role(new_role['id'], new_role) def _handle_domain_spec(self, test_data, domain_spec): """Handle the creation of domains and their contents. domain_spec may either be a count of the number of empty domains to create, a dict describing the domain contents, or a list of domain_specs. In the case when a list is provided, this method calls itself recursively to handle the list elements. This method will insert any entities created into test_data """ def _create_domain(domain_id=None): if domain_id is None: new_domain = unit.new_domain_ref() self.resource_api.create_domain(new_domain['id'], new_domain) return new_domain else: # The test plan specified an existing domain to use return self.resource_api.get_domain(domain_id) def _create_entity_in_domain(entity_type, domain_id): """Create a user or group entity in the domain.""" if entity_type == 'users': new_entity = unit.new_user_ref(domain_id=domain_id) new_entity = self.identity_api.create_user(new_entity) elif entity_type == 'groups': new_entity = unit.new_group_ref(domain_id=domain_id) new_entity = self.identity_api.create_group(new_entity) elif entity_type == 'roles': new_entity = self._create_role(domain_id=domain_id) else: # Must be a bad test plan raise exception.NotImplemented() return new_entity if isinstance(domain_spec, list): for x in domain_spec: self._handle_domain_spec(test_data, x) elif isinstance(domain_spec, dict): # If there is a domain ID specified, then use it the_domain = _create_domain(domain_spec.get('id')) test_data['domains'].append(the_domain) for entity_type, value in domain_spec.items(): if entity_type == 'id': # We already used this above to determine whether to # use and existing domain continue if entity_type == 'projects': # If it's projects, we need to handle the potential # specification of a project hierarchy self._handle_project_spec( test_data, the_domain['id'], value) else: # It's a count of number of entities for _ in range(value): test_data[entity_type].append( _create_entity_in_domain( entity_type, the_domain['id'])) else: for _ in range(domain_spec): test_data['domains'].append(_create_domain()) def create_entities(self, entity_pattern): """Create the entities specified in the test plan. Process the 'entities' key in the test plan, creating the requested entities. Each created entity will be added to the array of entities stored in the returned test_data object, e.g.: test_data['users'] = [user[0], user[1]....] """ test_data = {} for entity in ['users', 'groups', 'domains', 'projects', 'roles']: test_data[entity] = [] # Create any domains requested and, if specified, any entities within # those domains if 'domains' in entity_pattern: self._handle_domain_spec(test_data, entity_pattern['domains']) # Create any roles requested if 'roles' in entity_pattern: for _ in range(entity_pattern['roles']): test_data['roles'].append(self._create_role()) return test_data def _convert_entity_shorthand(self, key, shorthand_data, reference_data): """Convert a shorthand entity description into a full ID reference. In test plan definitions, we allow a shorthand for referencing to an entity of the form: 'user': 0 which is actually shorthand for: 'user_id': reference_data['users'][0]['id'] This method converts the shorthand version into the full reference. """ expanded_key = '%s_id' % key reference_index = '%ss' % key index_value = ( reference_data[reference_index][shorthand_data[key]]['id']) return expanded_key, index_value def create_implied_roles(self, implied_pattern, test_data): """Create the implied roles specified in the test plan.""" for implied_spec in implied_pattern: # Each implied role specification is a dict of the form: # # {'role': 0, 'implied_roles': list of roles} prior_role = test_data['roles'][implied_spec['role']]['id'] if isinstance(implied_spec['implied_roles'], list): for this_role in implied_spec['implied_roles']: implied_role = test_data['roles'][this_role]['id'] self.role_api.create_implied_role(prior_role, implied_role) else: implied_role = ( test_data['roles'][implied_spec['implied_roles']]['id']) self.role_api.create_implied_role(prior_role, implied_role) def create_group_memberships(self, group_pattern, test_data): """Create the group memberships specified in the test plan.""" for group_spec in group_pattern: # Each membership specification is a dict of the form: # # {'group': 0, 'users': [list of user indexes]} # # Add all users in the list to the specified group, first # converting from index to full entity ID. group_value = test_data['groups'][group_spec['group']]['id'] for user_index in group_spec['users']: user_value = test_data['users'][user_index]['id'] self.identity_api.add_user_to_group(user_value, group_value) return test_data def create_assignments(self, assignment_pattern, test_data): """Create the assignments specified in the test plan.""" # First store how many assignments are already in the system, # so during the tests we can check the number of new assignments # created. test_data['initial_assignment_count'] = ( len(self.assignment_api.list_role_assignments())) # Now create the new assignments in the test plan for assignment in assignment_pattern: # Each assignment is a dict of the form: # # { 'user': 0, 'project':1, 'role': 6} # # where the value of each item is the index into the array of # entities created earlier. # # We process the assignment dict to create the args required to # make the create_grant() call. args = {} for param in assignment: if param == 'inherited_to_projects': args[param] = assignment[param] else: # Turn 'entity : 0' into 'entity_id = ac6736ba873d' # where entity in user, group, project or domain key, value = self._convert_entity_shorthand( param, assignment, test_data) args[key] = value self.assignment_api.create_grant(**args) return test_data def execute_assignment_cases(self, test_plan, test_data): """Execute the test plan, based on the created test_data.""" def check_results(expected, actual, param_arg_count): if param_arg_count == 0: # It was an unfiltered call, so default fixture assignments # might be polluting our answer - so we take into account # how many assignments there were before the test. self.assertEqual( len(expected) + test_data['initial_assignment_count'], len(actual)) else: self.assertThat(actual, matchers.HasLength(len(expected))) for each_expected in expected: expected_assignment = {} for param in each_expected: if param == 'inherited_to_projects': expected_assignment[param] = each_expected[param] elif param == 'indirect': # We're expecting the result to contain an indirect # dict with the details how the role came to be placed # on this entity - so convert the key/value pairs of # that dict into real entity references. indirect_term = {} for indirect_param in each_expected[param]: key, value = self._convert_entity_shorthand( indirect_param, each_expected[param], test_data) indirect_term[key] = value expected_assignment[param] = indirect_term else: # Convert a simple shorthand entry into a full # entity reference key, value = self._convert_entity_shorthand( param, each_expected, test_data) expected_assignment[key] = value self.assertIn(expected_assignment, actual) def convert_group_ids_sourced_from_list(index_list, reference_data): value_list = [] for group_index in index_list: value_list.append( reference_data['groups'][group_index]['id']) return value_list # Go through each test in the array, processing the input params, which # we build into an args dict, and then call list_role_assignments. Then # check the results against those specified in the test plan. for test in test_plan.get('tests', []): args = {} for param in test['params']: if param in ['effective', 'inherited', 'include_subtree']: # Just pass the value into the args args[param] = test['params'][param] elif param == 'source_from_group_ids': # Convert the list of indexes into a list of IDs args[param] = convert_group_ids_sourced_from_list( test['params']['source_from_group_ids'], test_data) else: # Turn 'entity : 0' into 'entity_id = ac6736ba873d' # where entity in user, group, project or domain key, value = self._convert_entity_shorthand( param, test['params'], test_data) args[key] = value results = self.assignment_api.list_role_assignments(**args) check_results(test['results'], results, len(args)) def execute_assignment_plan(self, test_plan): """Create entities, assignments and execute the test plan. The standard method to call to create entities and assignments and execute the tests as specified in the test_plan. The test_data dict is returned so that, if required, the caller can execute additional manual tests with the entities and assignments created. """ test_data = self.create_entities(test_plan['entities']) if 'implied_roles' in test_plan: self.create_implied_roles(test_plan['implied_roles'], test_data) if 'group_memberships' in test_plan: self.create_group_memberships(test_plan['group_memberships'], test_data) if 'assignments' in test_plan: test_data = self.create_assignments(test_plan['assignments'], test_data) self.execute_assignment_cases(test_plan, test_data) return test_data class AssignmentTests(AssignmentTestHelperMixin): def _get_domain_fixture(self): domain = unit.new_domain_ref() self.resource_api.create_domain(domain['id'], domain) return domain def test_project_add_and_remove_user_role(self): user_ids = self.assignment_api.list_user_ids_for_project( self.tenant_bar['id']) self.assertNotIn(self.user_two['id'], user_ids) self.assignment_api.add_role_to_user_and_project( tenant_id=self.tenant_bar['id'], user_id=self.user_two['id'], role_id=self.role_other['id']) user_ids = self.assignment_api.list_user_ids_for_project( self.tenant_bar['id']) self.assertIn(self.user_two['id'], user_ids) self.assignment_api.remove_role_from_user_and_project( tenant_id=self.tenant_bar['id'], user_id=self.user_two['id'], role_id=self.role_other['id']) user_ids = self.assignment_api.list_user_ids_for_project( self.tenant_bar['id']) self.assertNotIn(self.user_two['id'], user_ids) def test_remove_user_role_not_assigned(self): # Expect failure if attempt to remove a role that was never assigned to # the user. self.assertRaises(exception.RoleNotFound, self.assignment_api. remove_role_from_user_and_project, tenant_id=self.tenant_bar['id'], user_id=self.user_two['id'], role_id=self.role_other['id']) def test_list_user_ids_for_project(self): user_ids = self.assignment_api.list_user_ids_for_project( self.tenant_baz['id']) self.assertEqual(2, len(user_ids)) self.assertIn(self.user_two['id'], user_ids) self.assertIn(self.user_badguy['id'], user_ids) def test_list_user_ids_for_project_no_duplicates(self): # Create user user_ref = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user_ref = self.identity_api.create_user(user_ref) # Create project project_ref = unit.new_project_ref( domain_id=CONF.identity.default_domain_id) self.resource_api.create_project( project_ref['id'], project_ref) # Create 2 roles and give user each role in project for i in range(2): role_ref = unit.new_role_ref() self.role_api.create_role(role_ref['id'], role_ref) self.assignment_api.add_role_to_user_and_project( user_id=user_ref['id'], tenant_id=project_ref['id'], role_id=role_ref['id']) # Get the list of user_ids in project user_ids = self.assignment_api.list_user_ids_for_project( project_ref['id']) # Ensure the user is only returned once self.assertEqual(1, len(user_ids)) def test_get_project_user_ids_returns_not_found(self): self.assertRaises(exception.ProjectNotFound, self.assignment_api.list_user_ids_for_project, uuid.uuid4().hex) def test_list_role_assignments_unfiltered(self): """Test unfiltered listing of role assignments.""" test_plan = { # Create a domain, with a user, group & project 'entities': {'domains': {'users': 1, 'groups': 1, 'projects': 1}, 'roles': 3}, # Create a grant of each type (user/group on project/domain) 'assignments': [{'user': 0, 'role': 0, 'domain': 0}, {'user': 0, 'role': 1, 'project': 0}, {'group': 0, 'role': 2, 'domain': 0}, {'group': 0, 'role': 2, 'project': 0}], 'tests': [ # Check that we get back the 4 assignments {'params': {}, 'results': [{'user': 0, 'role': 0, 'domain': 0}, {'user': 0, 'role': 1, 'project': 0}, {'group': 0, 'role': 2, 'domain': 0}, {'group': 0, 'role': 2, 'project': 0}]} ] } self.execute_assignment_plan(test_plan) def test_list_role_assignments_filtered_by_role(self): """Test listing of role assignments filtered by role ID.""" test_plan = { # Create a user, group & project in the default domain 'entities': {'domains': {'id': CONF.identity.default_domain_id, 'users': 1, 'groups': 1, 'projects': 1}, 'roles': 3}, # Create a grant of each type (user/group on project/domain) 'assignments': [{'user': 0, 'role': 0, 'domain': 0}, {'user': 0, 'role': 1, 'project': 0}, {'group': 0, 'role': 2, 'domain': 0}, {'group': 0, 'role': 2, 'project': 0}], 'tests': [ # Check that when filtering by role, we only get back those # that match {'params': {'role': 2}, 'results': [{'group': 0, 'role': 2, 'domain': 0}, {'group': 0, 'role': 2, 'project': 0}]} ] } self.execute_assignment_plan(test_plan) def test_list_group_role_assignment(self): # When a group role assignment is created and the role assignments are # listed then the group role assignment is included in the list. test_plan = { 'entities': {'domains': {'id': CONF.identity.default_domain_id, 'groups': 1, 'projects': 1}, 'roles': 1}, 'assignments': [{'group': 0, 'role': 0, 'project': 0}], 'tests': [ {'params': {}, 'results': [{'group': 0, 'role': 0, 'project': 0}]} ] } self.execute_assignment_plan(test_plan) def test_list_role_assignments_bad_role(self): assignment_list = self.assignment_api.list_role_assignments( role_id=uuid.uuid4().hex) self.assertEqual([], assignment_list) def test_add_duplicate_role_grant(self): roles_ref = self.assignment_api.get_roles_for_user_and_project( self.user_foo['id'], self.tenant_bar['id']) self.assertNotIn(self.role_admin['id'], roles_ref) self.assignment_api.add_role_to_user_and_project( self.user_foo['id'], self.tenant_bar['id'], self.role_admin['id']) self.assertRaises(exception.Conflict, self.assignment_api.add_role_to_user_and_project, self.user_foo['id'], self.tenant_bar['id'], self.role_admin['id']) def test_get_role_by_user_and_project_with_user_in_group(self): """Test for get role by user and project, user was added into a group. Test Plan: - Create a user, a project & a group, add this user to group - Create roles and grant them to user and project - Check the role list get by the user and project was as expected """ user_ref = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user_ref = self.identity_api.create_user(user_ref) project_ref = unit.new_project_ref( domain_id=CONF.identity.default_domain_id) self.resource_api.create_project(project_ref['id'], project_ref) group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id) group_id = self.identity_api.create_group(group)['id'] self.identity_api.add_user_to_group(user_ref['id'], group_id) role_ref_list = [] for i in range(2): role_ref = unit.new_role_ref() self.role_api.create_role(role_ref['id'], role_ref) role_ref_list.append(role_ref) self.assignment_api.add_role_to_user_and_project( user_id=user_ref['id'], tenant_id=project_ref['id'], role_id=role_ref['id']) role_list = self.assignment_api.get_roles_for_user_and_project( user_ref['id'], project_ref['id']) self.assertEqual(set([r['id'] for r in role_ref_list]), set(role_list)) def test_get_role_by_user_and_project(self): roles_ref = self.assignment_api.get_roles_for_user_and_project( self.user_foo['id'], self.tenant_bar['id']) self.assertNotIn(self.role_admin['id'], roles_ref) self.assignment_api.add_role_to_user_and_project( self.user_foo['id'], self.tenant_bar['id'], self.role_admin['id']) roles_ref = self.assignment_api.get_roles_for_user_and_project( self.user_foo['id'], self.tenant_bar['id']) self.assertIn(self.role_admin['id'], roles_ref) self.assertNotIn('member', roles_ref) self.assignment_api.add_role_to_user_and_project( self.user_foo['id'], self.tenant_bar['id'], 'member') roles_ref = self.assignment_api.get_roles_for_user_and_project( self.user_foo['id'], self.tenant_bar['id']) self.assertIn(self.role_admin['id'], roles_ref) self.assertIn('member', roles_ref) def test_get_roles_for_user_and_domain(self): """Test for getting roles for user on a domain. Test Plan: - Create a domain, with 2 users - Check no roles yet exit - Give user1 two roles on the domain, user2 one role - Get roles on user1 and the domain - maybe sure we only get back the 2 roles on user1 - Delete both roles from user1 - Check we get no roles back for user1 on domain """ new_domain = unit.new_domain_ref() self.resource_api.create_domain(new_domain['id'], new_domain) new_user1 = unit.new_user_ref(domain_id=new_domain['id']) new_user1 = self.identity_api.create_user(new_user1) new_user2 = unit.new_user_ref(domain_id=new_domain['id']) new_user2 = self.identity_api.create_user(new_user2) roles_ref = self.assignment_api.list_grants( user_id=new_user1['id'], domain_id=new_domain['id']) self.assertEqual(0, len(roles_ref)) # Now create the grants (roles are defined in default_fixtures) self.assignment_api.create_grant(user_id=new_user1['id'], domain_id=new_domain['id'], role_id='member') self.assignment_api.create_grant(user_id=new_user1['id'], domain_id=new_domain['id'], role_id='other') self.assignment_api.create_grant(user_id=new_user2['id'], domain_id=new_domain['id'], role_id='admin') # Read back the roles for user1 on domain roles_ids = self.assignment_api.get_roles_for_user_and_domain( new_user1['id'], new_domain['id']) self.assertEqual(2, len(roles_ids)) self.assertIn(self.role_member['id'], roles_ids) self.assertIn(self.role_other['id'], roles_ids) # Now delete both grants for user1 self.assignment_api.delete_grant(user_id=new_user1['id'], domain_id=new_domain['id'], role_id='member') self.assignment_api.delete_grant(user_id=new_user1['id'], domain_id=new_domain['id'], role_id='other') roles_ref = self.assignment_api.list_grants( user_id=new_user1['id'], domain_id=new_domain['id']) self.assertEqual(0, len(roles_ref)) def test_get_roles_for_user_and_domain_returns_not_found(self): """Test errors raised when getting roles for user on a domain. Test Plan: - Check non-existing user gives UserNotFound - Check non-existing domain gives DomainNotFound """ new_domain = self._get_domain_fixture() new_user1 = unit.new_user_ref(domain_id=new_domain['id']) new_user1 = self.identity_api.create_user(new_user1) self.assertRaises(exception.UserNotFound, self.assignment_api.get_roles_for_user_and_domain, uuid.uuid4().hex, new_domain['id']) self.assertRaises(exception.DomainNotFound, self.assignment_api.get_roles_for_user_and_domain, new_user1['id'], uuid.uuid4().hex) def test_get_roles_for_user_and_project_returns_not_found(self): self.assertRaises(exception.UserNotFound, self.assignment_api.get_roles_for_user_and_project, uuid.uuid4().hex, self.tenant_bar['id']) self.assertRaises(exception.ProjectNotFound, self.assignment_api.get_roles_for_user_and_project, self.user_foo['id'], uuid.uuid4().hex) def test_add_role_to_user_and_project_returns_not_found(self): self.assertRaises(exception.ProjectNotFound, self.assignment_api.add_role_to_user_and_project, self.user_foo['id'], uuid.uuid4().hex, self.role_admin['id']) self.assertRaises(exception.RoleNotFound, self.assignment_api.add_role_to_user_and_project, self.user_foo['id'], self.tenant_bar['id'], uuid.uuid4().hex) def test_add_role_to_user_and_project_no_user(self): # If add_role_to_user_and_project and the user doesn't exist, then # no error. user_id_not_exist = uuid.uuid4().hex self.assignment_api.add_role_to_user_and_project( user_id_not_exist, self.tenant_bar['id'], self.role_admin['id']) def test_remove_role_from_user_and_project(self): self.assignment_api.add_role_to_user_and_project( self.user_foo['id'], self.tenant_bar['id'], 'member') self.assignment_api.remove_role_from_user_and_project( self.user_foo['id'], self.tenant_bar['id'], 'member') roles_ref = self.assignment_api.get_roles_for_user_and_project( self.user_foo['id'], self.tenant_bar['id']) self.assertNotIn('member', roles_ref) self.assertRaises(exception.NotFound, self.assignment_api. remove_role_from_user_and_project, self.user_foo['id'], self.tenant_bar['id'], 'member') def test_get_role_grant_by_user_and_project(self): roles_ref = self.assignment_api.list_grants( user_id=self.user_foo['id'], project_id=self.tenant_bar['id']) self.assertEqual(1, len(roles_ref)) self.assignment_api.create_grant(user_id=self.user_foo['id'], project_id=self.tenant_bar['id'], role_id=self.role_admin['id']) roles_ref = self.assignment_api.list_grants( user_id=self.user_foo['id'], project_id=self.tenant_bar['id']) self.assertIn(self.role_admin['id'], [role_ref['id'] for role_ref in roles_ref]) self.assignment_api.create_grant(user_id=self.user_foo['id'], project_id=self.tenant_bar['id'], role_id='member') roles_ref = self.assignment_api.list_grants( user_id=self.user_foo['id'], project_id=self.tenant_bar['id']) roles_ref_ids = [] for ref in roles_ref: roles_ref_ids.append(ref['id']) self.assertIn(self.role_admin['id'], roles_ref_ids) self.assertIn('member', roles_ref_ids) def test_remove_role_grant_from_user_and_project(self): self.assignment_api.create_grant(user_id=self.user_foo['id'], project_id=self.tenant_baz['id'], role_id='member') roles_ref = self.assignment_api.list_grants( user_id=self.user_foo['id'], project_id=self.tenant_baz['id']) self.assertDictEqual(self.role_member, roles_ref[0]) self.assignment_api.delete_grant(user_id=self.user_foo['id'], project_id=self.tenant_baz['id'], role_id='member') roles_ref = self.assignment_api.list_grants( user_id=self.user_foo['id'], project_id=self.tenant_baz['id']) self.assertEqual(0, len(roles_ref)) self.assertRaises(exception.RoleAssignmentNotFound, self.assignment_api.delete_grant, user_id=self.user_foo['id'], project_id=self.tenant_baz['id'], role_id='member') def test_get_role_assignment_by_project_not_found(self): self.assertRaises(exception.RoleAssignmentNotFound, self.assignment_api.check_grant_role_id, user_id=self.user_foo['id'], project_id=self.tenant_baz['id'], role_id='member') self.assertRaises(exception.RoleAssignmentNotFound, self.assignment_api.check_grant_role_id, group_id=uuid.uuid4().hex, project_id=self.tenant_baz['id'], role_id='member') def test_get_role_assignment_by_domain_not_found(self): self.assertRaises(exception.RoleAssignmentNotFound, self.assignment_api.check_grant_role_id, user_id=self.user_foo['id'], domain_id=self.domain_default['id'], role_id='member') self.assertRaises(exception.RoleAssignmentNotFound, self.assignment_api.check_grant_role_id, group_id=uuid.uuid4().hex, domain_id=self.domain_default['id'], role_id='member') def test_del_role_assignment_by_project_not_found(self): self.assertRaises(exception.RoleAssignmentNotFound, self.assignment_api.delete_grant, user_id=self.user_foo['id'], project_id=self.tenant_baz['id'], role_id='member') self.assertRaises(exception.RoleAssignmentNotFound, self.assignment_api.delete_grant, group_id=uuid.uuid4().hex, project_id=self.tenant_baz['id'], role_id='member') def test_del_role_assignment_by_domain_not_found(self): self.assertRaises(exception.RoleAssignmentNotFound, self.assignment_api.delete_grant, user_id=self.user_foo['id'], domain_id=self.domain_default['id'], role_id='member') self.assertRaises(exception.RoleAssignmentNotFound, self.assignment_api.delete_grant, group_id=uuid.uuid4().hex, domain_id=self.domain_default['id'], role_id='member') def test_get_and_remove_role_grant_by_group_and_project(self): new_domain = unit.new_domain_ref() self.resource_api.create_domain(new_domain['id'], new_domain) new_group = unit.new_group_ref(domain_id=new_domain['id']) new_group = self.identity_api.create_group(new_group) new_user = unit.new_user_ref(domain_id=new_domain['id']) new_user = self.identity_api.create_user(new_user) self.identity_api.add_user_to_group(new_user['id'], new_group['id']) roles_ref = self.assignment_api.list_grants( group_id=new_group['id'], project_id=self.tenant_bar['id']) self.assertEqual(0, len(roles_ref)) self.assignment_api.create_grant(group_id=new_group['id'], project_id=self.tenant_bar['id'], role_id='member') roles_ref = self.assignment_api.list_grants( group_id=new_group['id'], project_id=self.tenant_bar['id']) self.assertDictEqual(self.role_member, roles_ref[0]) self.assignment_api.delete_grant(group_id=new_group['id'], project_id=self.tenant_bar['id'], role_id='member') roles_ref = self.assignment_api.list_grants( group_id=new_group['id'], project_id=self.tenant_bar['id']) self.assertEqual(0, len(roles_ref)) self.assertRaises(exception.RoleAssignmentNotFound, self.assignment_api.delete_grant, group_id=new_group['id'], project_id=self.tenant_bar['id'], role_id='member') def test_get_and_remove_role_grant_by_group_and_domain(self): new_domain = unit.new_domain_ref() self.resource_api.create_domain(new_domain['id'], new_domain) new_group = unit.new_group_ref(domain_id=new_domain['id']) new_group = self.identity_api.create_group(new_group) new_user = unit.new_user_ref(domain_id=new_domain['id']) new_user = self.identity_api.create_user(new_user) self.identity_api.add_user_to_group(new_user['id'], new_group['id']) roles_ref = self.assignment_api.list_grants( group_id=new_group['id'], domain_id=new_domain['id']) self.assertEqual(0, len(roles_ref)) self.assignment_api.create_grant(group_id=new_group['id'], domain_id=new_domain['id'], role_id='member') roles_ref = self.assignment_api.list_grants( group_id=new_group['id'], domain_id=new_domain['id']) self.assertDictEqual(self.role_member, roles_ref[0]) self.assignment_api.delete_grant(group_id=new_group['id'], domain_id=new_domain['id'], role_id='member') roles_ref = self.assignment_api.list_grants( group_id=new_group['id'], domain_id=new_domain['id']) self.assertEqual(0, len(roles_ref)) self.assertRaises(exception.RoleAssignmentNotFound, self.assignment_api.delete_grant, group_id=new_group['id'], domain_id=new_domain['id'], role_id='member') def test_get_and_remove_correct_role_grant_from_a_mix(self): new_domain = unit.new_domain_ref() self.resource_api.create_domain(new_domain['id'], new_domain) new_project = unit.new_project_ref(domain_id=new_domain['id']) self.resource_api.create_project(new_project['id'], new_project) new_group = unit.new_group_ref(domain_id=new_domain['id']) new_group = self.identity_api.create_group(new_group) new_group2 = unit.new_group_ref(domain_id=new_domain['id']) new_group2 = self.identity_api.create_group(new_group2) new_user = unit.new_user_ref(domain_id=new_domain['id']) new_user = self.identity_api.create_user(new_user) new_user2 = unit.new_user_ref(domain_id=new_domain['id']) new_user2 = self.identity_api.create_user(new_user2) self.identity_api.add_user_to_group(new_user['id'], new_group['id']) # First check we have no grants roles_ref = self.assignment_api.list_grants( group_id=new_group['id'], domain_id=new_domain['id']) self.assertEqual(0, len(roles_ref)) # Now add the grant we are going to test for, and some others as # well just to make sure we get back the right one self.assignment_api.create_grant(group_id=new_group['id'], domain_id=new_domain['id'], role_id='member') self.assignment_api.create_grant(group_id=new_group2['id'], domain_id=new_domain['id'], role_id=self.role_admin['id']) self.assignment_api.create_grant(user_id=new_user2['id'], domain_id=new_domain['id'], role_id=self.role_admin['id']) self.assignment_api.create_grant(group_id=new_group['id'], project_id=new_project['id'], role_id=self.role_admin['id']) roles_ref = self.assignment_api.list_grants( group_id=new_group['id'], domain_id=new_domain['id']) self.assertDictEqual(self.role_member, roles_ref[0]) self.assignment_api.delete_grant(group_id=new_group['id'], domain_id=new_domain['id'], role_id='member') roles_ref = self.assignment_api.list_grants( group_id=new_group['id'], domain_id=new_domain['id']) self.assertEqual(0, len(roles_ref)) self.assertRaises(exception.RoleAssignmentNotFound, self.assignment_api.delete_grant, group_id=new_group['id'], domain_id=new_domain['id'], role_id='member') def test_get_and_remove_role_grant_by_user_and_domain(self): new_domain = unit.new_domain_ref() self.resource_api.create_domain(new_domain['id'], new_domain) new_user = unit.new_user_ref(domain_id=new_domain['id']) new_user = self.identity_api.create_user(new_user) roles_ref = self.assignment_api.list_grants( user_id=new_user['id'], domain_id=new_domain['id']) self.assertEqual(0, len(roles_ref)) self.assignment_api.create_grant(user_id=new_user['id'], domain_id=new_domain['id'], role_id='member') roles_ref = self.assignment_api.list_grants( user_id=new_user['id'], domain_id=new_domain['id']) self.assertDictEqual(self.role_member, roles_ref[0]) self.assignment_api.delete_grant(user_id=new_user['id'], domain_id=new_domain['id'], role_id='member') roles_ref = self.assignment_api.list_grants( user_id=new_user['id'], domain_id=new_domain['id']) self.assertEqual(0, len(roles_ref)) self.assertRaises(exception.RoleAssignmentNotFound, self.assignment_api.delete_grant, user_id=new_user['id'], domain_id=new_domain['id'], role_id='member') def test_get_and_remove_role_grant_by_group_and_cross_domain(self): group1_domain1_role = unit.new_role_ref() self.role_api.create_role(group1_domain1_role['id'], group1_domain1_role) group1_domain2_role = unit.new_role_ref() self.role_api.create_role(group1_domain2_role['id'], group1_domain2_role) domain1 = unit.new_domain_ref() self.resource_api.create_domain(domain1['id'], domain1) domain2 = unit.new_domain_ref() self.resource_api.create_domain(domain2['id'], domain2) group1 = unit.new_group_ref(domain_id=domain1['id']) group1 = self.identity_api.create_group(group1) roles_ref = self.assignment_api.list_grants( group_id=group1['id'], domain_id=domain1['id']) self.assertEqual(0, len(roles_ref)) roles_ref = self.assignment_api.list_grants( group_id=group1['id'], domain_id=domain2['id']) self.assertEqual(0, len(roles_ref)) self.assignment_api.create_grant(group_id=group1['id'], domain_id=domain1['id'], role_id=group1_domain1_role['id']) self.assignment_api.create_grant(group_id=group1['id'], domain_id=domain2['id'], role_id=group1_domain2_role['id']) roles_ref = self.assignment_api.list_grants( group_id=group1['id'], domain_id=domain1['id']) self.assertDictEqual(group1_domain1_role, roles_ref[0]) roles_ref = self.assignment_api.list_grants( group_id=group1['id'], domain_id=domain2['id']) self.assertDictEqual(group1_domain2_role, roles_ref[0]) self.assignment_api.delete_grant(group_id=group1['id'], domain_id=domain2['id'], role_id=group1_domain2_role['id']) roles_ref = self.assignment_api.list_grants( group_id=group1['id'], domain_id=domain2['id']) self.assertEqual(0, len(roles_ref)) self.assertRaises(exception.RoleAssignmentNotFound, self.assignment_api.delete_grant, group_id=group1['id'], domain_id=domain2['id'], role_id=group1_domain2_role['id']) def test_get_and_remove_role_grant_by_user_and_cross_domain(self): user1_domain1_role = unit.new_role_ref() self.role_api.create_role(user1_domain1_role['id'], user1_domain1_role) user1_domain2_role = unit.new_role_ref() self.role_api.create_role(user1_domain2_role['id'], user1_domain2_role) domain1 = unit.new_domain_ref() self.resource_api.create_domain(domain1['id'], domain1) domain2 = unit.new_domain_ref() self.resource_api.create_domain(domain2['id'], domain2) user1 = unit.new_user_ref(domain_id=domain1['id']) user1 = self.identity_api.create_user(user1) roles_ref = self.assignment_api.list_grants( user_id=user1['id'], domain_id=domain1['id']) self.assertEqual(0, len(roles_ref)) roles_ref = self.assignment_api.list_grants( user_id=user1['id'], domain_id=domain2['id']) self.assertEqual(0, len(roles_ref)) self.assignment_api.create_grant(user_id=user1['id'], domain_id=domain1['id'], role_id=user1_domain1_role['id']) self.assignment_api.create_grant(user_id=user1['id'], domain_id=domain2['id'], role_id=user1_domain2_role['id']) roles_ref = self.assignment_api.list_grants( user_id=user1['id'], domain_id=domain1['id']) self.assertDictEqual(user1_domain1_role, roles_ref[0]) roles_ref = self.assignment_api.list_grants( user_id=user1['id'], domain_id=domain2['id']) self.assertDictEqual(user1_domain2_role, roles_ref[0]) self.assignment_api.delete_grant(user_id=user1['id'], domain_id=domain2['id'], role_id=user1_domain2_role['id']) roles_ref = self.assignment_api.list_grants( user_id=user1['id'], domain_id=domain2['id']) self.assertEqual(0, len(roles_ref)) self.assertRaises(exception.RoleAssignmentNotFound, self.assignment_api.delete_grant, user_id=user1['id'], domain_id=domain2['id'], role_id=user1_domain2_role['id']) def test_role_grant_by_group_and_cross_domain_project(self): role1 = unit.new_role_ref() self.role_api.create_role(role1['id'], role1) role2 = unit.new_role_ref() self.role_api.create_role(role2['id'], role2) domain1 = unit.new_domain_ref() self.resource_api.create_domain(domain1['id'], domain1) domain2 = unit.new_domain_ref() self.resource_api.create_domain(domain2['id'], domain2) group1 = unit.new_group_ref(domain_id=domain1['id']) group1 = self.identity_api.create_group(group1) project1 = unit.new_project_ref(domain_id=domain2['id']) self.resource_api.create_project(project1['id'], project1) roles_ref = self.assignment_api.list_grants( group_id=group1['id'], project_id=project1['id']) self.assertEqual(0, len(roles_ref)) self.assignment_api.create_grant(group_id=group1['id'], project_id=project1['id'], role_id=role1['id']) self.assignment_api.create_grant(group_id=group1['id'], project_id=project1['id'], role_id=role2['id']) roles_ref = self.assignment_api.list_grants( group_id=group1['id'], project_id=project1['id']) roles_ref_ids = [] for ref in roles_ref: roles_ref_ids.append(ref['id']) self.assertIn(role1['id'], roles_ref_ids) self.assertIn(role2['id'], roles_ref_ids) self.assignment_api.delete_grant(group_id=group1['id'], project_id=project1['id'], role_id=role1['id']) roles_ref = self.assignment_api.list_grants( group_id=group1['id'], project_id=project1['id']) self.assertEqual(1, len(roles_ref)) self.assertDictEqual(role2, roles_ref[0]) def test_role_grant_by_user_and_cross_domain_project(self): role1 = unit.new_role_ref() self.role_api.create_role(role1['id'], role1) role2 = unit.new_role_ref() self.role_api.create_role(role2['id'], role2) domain1 = unit.new_domain_ref() self.resource_api.create_domain(domain1['id'], domain1) domain2 = unit.new_domain_ref() self.resource_api.create_domain(domain2['id'], domain2) user1 = unit.new_user_ref(domain_id=domain1['id']) user1 = self.identity_api.create_user(user1) project1 = unit.new_project_ref(domain_id=domain2['id']) self.resource_api.create_project(project1['id'], project1) roles_ref = self.assignment_api.list_grants( user_id=user1['id'], project_id=project1['id']) self.assertEqual(0, len(roles_ref)) self.assignment_api.create_grant(user_id=user1['id'], project_id=project1['id'], role_id=role1['id']) self.assignment_api.create_grant(user_id=user1['id'], project_id=project1['id'], role_id=role2['id']) roles_ref = self.assignment_api.list_grants( user_id=user1['id'], project_id=project1['id']) roles_ref_ids = [] for ref in roles_ref: roles_ref_ids.append(ref['id']) self.assertIn(role1['id'], roles_ref_ids) self.assertIn(role2['id'], roles_ref_ids) self.assignment_api.delete_grant(user_id=user1['id'], project_id=project1['id'], role_id=role1['id']) roles_ref = self.assignment_api.list_grants( user_id=user1['id'], project_id=project1['id']) self.assertEqual(1, len(roles_ref)) self.assertDictEqual(role2, roles_ref[0]) def test_delete_user_grant_no_user(self): # Can delete a grant where the user doesn't exist. role = unit.new_role_ref() role_id = role['id'] self.role_api.create_role(role_id, role) user_id = uuid.uuid4().hex self.assignment_api.create_grant(role_id, user_id=user_id, project_id=self.tenant_bar['id']) self.assignment_api.delete_grant(role_id, user_id=user_id, project_id=self.tenant_bar['id']) def test_delete_group_grant_no_group(self): # Can delete a grant where the group doesn't exist. role = unit.new_role_ref() role_id = role['id'] self.role_api.create_role(role_id, role) group_id = uuid.uuid4().hex self.assignment_api.create_grant(role_id, group_id=group_id, project_id=self.tenant_bar['id']) self.assignment_api.delete_grant(role_id, group_id=group_id, project_id=self.tenant_bar['id']) def test_grant_crud_throws_exception_if_invalid_role(self): """Ensure RoleNotFound thrown if role does not exist.""" def assert_role_not_found_exception(f, **kwargs): self.assertRaises(exception.RoleNotFound, f, role_id=uuid.uuid4().hex, **kwargs) user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user_resp = self.identity_api.create_user(user) group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id) group_resp = self.identity_api.create_group(group) project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id) project_resp = self.resource_api.create_project(project['id'], project) for manager_call in [self.assignment_api.create_grant, self.assignment_api.get_grant, self.assignment_api.delete_grant]: assert_role_not_found_exception( manager_call, user_id=user_resp['id'], project_id=project_resp['id']) assert_role_not_found_exception( manager_call, group_id=group_resp['id'], project_id=project_resp['id']) assert_role_not_found_exception( manager_call, user_id=user_resp['id'], domain_id=CONF.identity.default_domain_id) assert_role_not_found_exception( manager_call, group_id=group_resp['id'], domain_id=CONF.identity.default_domain_id) def test_multi_role_grant_by_user_group_on_project_domain(self): role_list = [] for _ in range(10): role = unit.new_role_ref() self.role_api.create_role(role['id'], role) role_list.append(role) domain1 = unit.new_domain_ref() self.resource_api.create_domain(domain1['id'], domain1) user1 = unit.new_user_ref(domain_id=domain1['id']) user1 = self.identity_api.create_user(user1) group1 = unit.new_group_ref(domain_id=domain1['id']) group1 = self.identity_api.create_group(group1) group2 = unit.new_group_ref(domain_id=domain1['id']) group2 = self.identity_api.create_group(group2) project1 = unit.new_project_ref(domain_id=domain1['id']) self.resource_api.create_project(project1['id'], project1) self.identity_api.add_user_to_group(user1['id'], group1['id']) self.identity_api.add_user_to_group(user1['id'], group2['id']) roles_ref = self.assignment_api.list_grants( user_id=user1['id'], project_id=project1['id']) self.assertEqual(0, len(roles_ref)) self.assignment_api.create_grant(user_id=user1['id'], domain_id=domain1['id'], role_id=role_list[0]['id']) self.assignment_api.create_grant(user_id=user1['id'], domain_id=domain1['id'], role_id=role_list[1]['id']) self.assignment_api.create_grant(group_id=group1['id'], domain_id=domain1['id'], role_id=role_list[2]['id']) self.assignment_api.create_grant(group_id=group1['id'], domain_id=domain1['id'], role_id=role_list[3]['id']) self.assignment_api.create_grant(user_id=user1['id'], project_id=project1['id'], role_id=role_list[4]['id']) self.assignment_api.create_grant(user_id=user1['id'], project_id=project1['id'], role_id=role_list[5]['id']) self.assignment_api.create_grant(group_id=group1['id'], project_id=project1['id'], role_id=role_list[6]['id']) self.assignment_api.create_grant(group_id=group1['id'], project_id=project1['id'], role_id=role_list[7]['id']) roles_ref = self.assignment_api.list_grants(user_id=user1['id'], domain_id=domain1['id']) self.assertEqual(2, len(roles_ref)) self.assertIn(role_list[0], roles_ref) self.assertIn(role_list[1], roles_ref) roles_ref = self.assignment_api.list_grants(group_id=group1['id'], domain_id=domain1['id']) self.assertEqual(2, len(roles_ref)) self.assertIn(role_list[2], roles_ref) self.assertIn(role_list[3], roles_ref) roles_ref = self.assignment_api.list_grants(user_id=user1['id'], project_id=project1['id']) self.assertEqual(2, len(roles_ref)) self.assertIn(role_list[4], roles_ref) self.assertIn(role_list[5], roles_ref) roles_ref = self.assignment_api.list_grants(group_id=group1['id'], project_id=project1['id']) self.assertEqual(2, len(roles_ref)) self.assertIn(role_list[6], roles_ref) self.assertIn(role_list[7], roles_ref) # Now test the alternate way of getting back lists of grants, # where user and group roles are combined. These should match # the above results. combined_list = self.assignment_api.get_roles_for_user_and_project( user1['id'], project1['id']) self.assertEqual(4, len(combined_list)) self.assertIn(role_list[4]['id'], combined_list) self.assertIn(role_list[5]['id'], combined_list) self.assertIn(role_list[6]['id'], combined_list) self.assertIn(role_list[7]['id'], combined_list) combined_role_list = self.assignment_api.get_roles_for_user_and_domain( user1['id'], domain1['id']) self.assertEqual(4, len(combined_role_list)) self.assertIn(role_list[0]['id'], combined_role_list) self.assertIn(role_list[1]['id'], combined_role_list) self.assertIn(role_list[2]['id'], combined_role_list) self.assertIn(role_list[3]['id'], combined_role_list) def test_multi_group_grants_on_project_domain(self): """Test multiple group roles for user on project and domain. Test Plan: - Create 6 roles - Create a domain, with a project, user and two groups - Make the user a member of both groups - Check no roles yet exit - Assign a role to each user and both groups on both the project and domain - Get a list of effective roles for the user on both the project and domain, checking we get back the correct three roles """ role_list = [] for _ in range(6): role = unit.new_role_ref() self.role_api.create_role(role['id'], role) role_list.append(role) domain1 = unit.new_domain_ref() self.resource_api.create_domain(domain1['id'], domain1) user1 = unit.new_user_ref(domain_id=domain1['id']) user1 = self.identity_api.create_user(user1) group1 = unit.new_group_ref(domain_id=domain1['id']) group1 = self.identity_api.create_group(group1) group2 = unit.new_group_ref(domain_id=domain1['id']) group2 = self.identity_api.create_group(group2) project1 = unit.new_project_ref(domain_id=domain1['id']) self.resource_api.create_project(project1['id'], project1) self.identity_api.add_user_to_group(user1['id'], group1['id']) self.identity_api.add_user_to_group(user1['id'], group2['id']) roles_ref = self.assignment_api.list_grants( user_id=user1['id'], project_id=project1['id']) self.assertEqual(0, len(roles_ref)) self.assignment_api.create_grant(user_id=user1['id'], domain_id=domain1['id'], role_id=role_list[0]['id']) self.assignment_api.create_grant(group_id=group1['id'], domain_id=domain1['id'], role_id=role_list[1]['id']) self.assignment_api.create_grant(group_id=group2['id'], domain_id=domain1['id'], role_id=role_list[2]['id']) self.assignment_api.create_grant(user_id=user1['id'], project_id=project1['id'], role_id=role_list[3]['id']) self.assignment_api.create_grant(group_id=group1['id'], project_id=project1['id'], role_id=role_list[4]['id']) self.assignment_api.create_grant(group_id=group2['id'], project_id=project1['id'], role_id=role_list[5]['id']) # Read by the roles, ensuring we get the correct 3 roles for # both project and domain combined_list = self.assignment_api.get_roles_for_user_and_project( user1['id'], project1['id']) self.assertEqual(3, len(combined_list)) self.assertIn(role_list[3]['id'], combined_list) self.assertIn(role_list[4]['id'], combined_list) self.assertIn(role_list[5]['id'], combined_list) combined_role_list = self.assignment_api.get_roles_for_user_and_domain( user1['id'], domain1['id']) self.assertEqual(3, len(combined_role_list)) self.assertIn(role_list[0]['id'], combined_role_list) self.assertIn(role_list[1]['id'], combined_role_list) self.assertIn(role_list[2]['id'], combined_role_list) def test_delete_role_with_user_and_group_grants(self): role1 = unit.new_role_ref() self.role_api.create_role(role1['id'], role1) domain1 = unit.new_domain_ref() self.resource_api.create_domain(domain1['id'], domain1) project1 = unit.new_project_ref(domain_id=domain1['id']) self.resource_api.create_project(project1['id'], project1) user1 = unit.new_user_ref(domain_id=domain1['id']) user1 = self.identity_api.create_user(user1) group1 = unit.new_group_ref(domain_id=domain1['id']) group1 = self.identity_api.create_group(group1) self.assignment_api.create_grant(user_id=user1['id'], project_id=project1['id'], role_id=role1['id']) self.assignment_api.create_grant(user_id=user1['id'], domain_id=domain1['id'], role_id=role1['id']) self.assignment_api.create_grant(group_id=group1['id'], project_id=project1['id'], role_id=role1['id']) self.assignment_api.create_grant(group_id=group1['id'], domain_id=domain1['id'], role_id=role1['id']) roles_ref = self.assignment_api.list_grants( user_id=user1['id'], project_id=project1['id']) self.assertEqual(1, len(roles_ref)) roles_ref = self.assignment_api.list_grants( group_id=group1['id'], project_id=project1['id']) self.assertEqual(1, len(roles_ref)) roles_ref = self.assignment_api.list_grants( user_id=user1['id'], domain_id=domain1['id']) self.assertEqual(1, len(roles_ref)) roles_ref = self.assignment_api.list_grants( group_id=group1['id'], domain_id=domain1['id']) self.assertEqual(1, len(roles_ref)) self.role_api.delete_role(role1['id']) roles_ref = self.assignment_api.list_grants( user_id=user1['id'], project_id=project1['id']) self.assertEqual(0, len(roles_ref)) roles_ref = self.assignment_api.list_grants( group_id=group1['id'], project_id=project1['id']) self.assertEqual(0, len(roles_ref)) roles_ref = self.assignment_api.list_grants( user_id=user1['id'], domain_id=domain1['id']) self.assertEqual(0, len(roles_ref)) roles_ref = self.assignment_api.list_grants( group_id=group1['id'], domain_id=domain1['id']) self.assertEqual(0, len(roles_ref)) def test_list_role_assignment_by_domain(self): """Test listing of role assignment filtered by domain.""" test_plan = { # A domain with 3 users, 1 group, a spoiler domain and 2 roles. 'entities': {'domains': [{'users': 3, 'groups': 1}, 1], 'roles': 2}, # Users 1 & 2 are in the group 'group_memberships': [{'group': 0, 'users': [1, 2]}], # Assign a role for user 0 and the group 'assignments': [{'user': 0, 'role': 0, 'domain': 0}, {'group': 0, 'role': 1, 'domain': 0}], 'tests': [ # List all effective assignments for domain[0]. # Should get one direct user role and user roles for each of # the users in the group. {'params': {'domain': 0, 'effective': True}, 'results': [{'user': 0, 'role': 0, 'domain': 0}, {'user': 1, 'role': 1, 'domain': 0, 'indirect': {'group': 0}}, {'user': 2, 'role': 1, 'domain': 0, 'indirect': {'group': 0}} ]}, # Using domain[1] should return nothing {'params': {'domain': 1, 'effective': True}, 'results': []}, ] } self.execute_assignment_plan(test_plan) def test_list_role_assignment_by_user_with_domain_group_roles(self): """Test listing assignments by user, with group roles on a domain.""" test_plan = { # A domain with 3 users, 3 groups, a spoiler domain # plus 3 roles. 'entities': {'domains': [{'users': 3, 'groups': 3}, 1], 'roles': 3}, # Users 1 & 2 are in the group 0, User 1 also in group 1 'group_memberships': [{'group': 0, 'users': [0, 1]}, {'group': 1, 'users': [0]}], 'assignments': [{'user': 0, 'role': 0, 'domain': 0}, {'group': 0, 'role': 1, 'domain': 0}, {'group': 1, 'role': 2, 'domain': 0}, # ...and two spoiler assignments {'user': 1, 'role': 1, 'domain': 0}, {'group': 2, 'role': 2, 'domain': 0}], 'tests': [ # List all effective assignments for user[0]. # Should get one direct user role and a user roles for each of # groups 0 and 1 {'params': {'user': 0, 'effective': True}, 'results': [{'user': 0, 'role': 0, 'domain': 0}, {'user': 0, 'role': 1, 'domain': 0, 'indirect': {'group': 0}}, {'user': 0, 'role': 2, 'domain': 0, 'indirect': {'group': 1}} ]}, # Adding domain[0] as a filter should return the same data {'params': {'user': 0, 'domain': 0, 'effective': True}, 'results': [{'user': 0, 'role': 0, 'domain': 0}, {'user': 0, 'role': 1, 'domain': 0, 'indirect': {'group': 0}}, {'user': 0, 'role': 2, 'domain': 0, 'indirect': {'group': 1}} ]}, # Using domain[1] should return nothing {'params': {'user': 0, 'domain': 1, 'effective': True}, 'results': []}, # Using user[2] should return nothing {'params': {'user': 2, 'domain': 0, 'effective': True}, 'results': []}, ] } self.execute_assignment_plan(test_plan) def test_list_role_assignment_using_sourced_groups(self): """Test listing assignments when restricted by source groups.""" test_plan = { # The default domain with 3 users, 3 groups, 3 projects, # plus 3 roles. 'entities': {'domains': {'id': CONF.identity.default_domain_id, 'users': 3, 'groups': 3, 'projects': 3}, 'roles': 3}, # Users 0 & 1 are in the group 0, User 0 also in group 1 'group_memberships': [{'group': 0, 'users': [0, 1]}, {'group': 1, 'users': [0]}], # Spread the assignments around - we want to be able to show that # if sourced by group, assignments from other sources are excluded 'assignments': [{'user': 0, 'role': 0, 'project': 0}, {'group': 0, 'role': 1, 'project': 1}, {'group': 1, 'role': 2, 'project': 0}, {'group': 1, 'role': 2, 'project': 1}, {'user': 2, 'role': 1, 'project': 1}, {'group': 2, 'role': 2, 'project': 2} ], 'tests': [ # List all effective assignments sourced from groups 0 and 1 {'params': {'source_from_group_ids': [0, 1], 'effective': True}, 'results': [{'group': 0, 'role': 1, 'project': 1}, {'group': 1, 'role': 2, 'project': 0}, {'group': 1, 'role': 2, 'project': 1} ]}, # Adding a role a filter should further restrict the entries {'params': {'source_from_group_ids': [0, 1], 'role': 2, 'effective': True}, 'results': [{'group': 1, 'role': 2, 'project': 0}, {'group': 1, 'role': 2, 'project': 1} ]}, ] } self.execute_assignment_plan(test_plan) def test_list_role_assignment_using_sourced_groups_with_domains(self): """Test listing domain assignments when restricted by source groups.""" test_plan = { # A domain with 3 users, 3 groups, 3 projects, a second domain, # plus 3 roles. 'entities': {'domains': [{'users': 3, 'groups': 3, 'projects': 3}, 1], 'roles': 3}, # Users 0 & 1 are in the group 0, User 0 also in group 1 'group_memberships': [{'group': 0, 'users': [0, 1]}, {'group': 1, 'users': [0]}], # Spread the assignments around - we want to be able to show that # if sourced by group, assignments from other sources are excluded 'assignments': [{'user': 0, 'role': 0, 'domain': 0}, {'group': 0, 'role': 1, 'domain': 1}, {'group': 1, 'role': 2, 'project': 0}, {'group': 1, 'role': 2, 'project': 1}, {'user': 2, 'role': 1, 'project': 1}, {'group': 2, 'role': 2, 'project': 2} ], 'tests': [ # List all effective assignments sourced from groups 0 and 1 {'params': {'source_from_group_ids': [0, 1], 'effective': True}, 'results': [{'group': 0, 'role': 1, 'domain': 1}, {'group': 1, 'role': 2, 'project': 0}, {'group': 1, 'role': 2, 'project': 1} ]}, # Adding a role a filter should further restrict the entries {'params': {'source_from_group_ids': [0, 1], 'role': 1, 'effective': True}, 'results': [{'group': 0, 'role': 1, 'domain': 1}, ]}, ] } self.execute_assignment_plan(test_plan) def test_list_role_assignment_fails_with_userid_and_source_groups(self): """Show we trap this unsupported internal combination of params.""" group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id) group = self.identity_api.create_group(group) self.assertRaises(exception.UnexpectedError, self.assignment_api.list_role_assignments, effective=True, user_id=self.user_foo['id'], source_from_group_ids=[group['id']]) def test_add_user_to_project(self): self.assignment_api.add_user_to_project(self.tenant_baz['id'], self.user_foo['id']) tenants = self.assignment_api.list_projects_for_user( self.user_foo['id']) self.assertIn(self.tenant_baz, tenants) def test_add_user_to_project_missing_default_role(self): self.role_api.delete_role(CONF.member_role_id) self.assertRaises(exception.RoleNotFound, self.role_api.get_role, CONF.member_role_id) self.assignment_api.add_user_to_project(self.tenant_baz['id'], self.user_foo['id']) tenants = ( self.assignment_api.list_projects_for_user(self.user_foo['id'])) self.assertIn(self.tenant_baz, tenants) default_role = self.role_api.get_role(CONF.member_role_id) self.assertIsNotNone(default_role) def test_add_user_to_project_returns_not_found(self): self.assertRaises(exception.ProjectNotFound, self.assignment_api.add_user_to_project, uuid.uuid4().hex, self.user_foo['id']) def test_add_user_to_project_no_user(self): # If add_user_to_project and the user doesn't exist, then # no error. user_id_not_exist = uuid.uuid4().hex self.assignment_api.add_user_to_project(self.tenant_bar['id'], user_id_not_exist) def test_remove_user_from_project(self): self.assignment_api.add_user_to_project(self.tenant_baz['id'], self.user_foo['id']) self.assignment_api.remove_user_from_project(self.tenant_baz['id'], self.user_foo['id']) tenants = self.assignment_api.list_projects_for_user( self.user_foo['id']) self.assertNotIn(self.tenant_baz, tenants) def test_remove_user_from_project_race_delete_role(self): self.assignment_api.add_user_to_project(self.tenant_baz['id'], self.user_foo['id']) self.assignment_api.add_role_to_user_and_project( tenant_id=self.tenant_baz['id'], user_id=self.user_foo['id'], role_id=self.role_other['id']) # Mock a race condition, delete a role after # get_roles_for_user_and_project() is called in # remove_user_from_project(). roles = self.assignment_api.get_roles_for_user_and_project( self.user_foo['id'], self.tenant_baz['id']) self.role_api.delete_role(self.role_other['id']) self.assignment_api.get_roles_for_user_and_project = mock.Mock( return_value=roles) self.assignment_api.remove_user_from_project(self.tenant_baz['id'], self.user_foo['id']) tenants = self.assignment_api.list_projects_for_user( self.user_foo['id']) self.assertNotIn(self.tenant_baz, tenants) def test_remove_user_from_project_returns_not_found(self): self.assertRaises(exception.ProjectNotFound, self.assignment_api.remove_user_from_project, uuid.uuid4().hex, self.user_foo['id']) self.assertRaises(exception.UserNotFound, self.assignment_api.remove_user_from_project, self.tenant_bar['id'], uuid.uuid4().hex) self.assertRaises(exception.NotFound, self.assignment_api.remove_user_from_project, self.tenant_baz['id'], self.user_foo['id']) def test_list_user_project_ids_returns_not_found(self): self.assertRaises(exception.UserNotFound, self.assignment_api.list_projects_for_user, uuid.uuid4().hex) def test_delete_user_with_project_association(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user = self.identity_api.create_user(user) self.assignment_api.add_user_to_project(self.tenant_bar['id'], user['id']) self.identity_api.delete_user(user['id']) self.assertRaises(exception.UserNotFound, self.assignment_api.list_projects_for_user, user['id']) def test_delete_user_with_project_roles(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user = self.identity_api.create_user(user) self.assignment_api.add_role_to_user_and_project( user['id'], self.tenant_bar['id'], self.role_member['id']) self.identity_api.delete_user(user['id']) self.assertRaises(exception.UserNotFound, self.assignment_api.list_projects_for_user, user['id']) def test_delete_role_returns_not_found(self): self.assertRaises(exception.RoleNotFound, self.role_api.delete_role, uuid.uuid4().hex) def test_delete_project_with_role_assignments(self): project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id) self.resource_api.create_project(project['id'], project) self.assignment_api.add_role_to_user_and_project( self.user_foo['id'], project['id'], 'member') self.resource_api.delete_project(project['id']) self.assertRaises(exception.ProjectNotFound, self.assignment_api.list_user_ids_for_project, project['id']) def test_delete_role_check_role_grant(self): role = unit.new_role_ref() alt_role = unit.new_role_ref() self.role_api.create_role(role['id'], role) self.role_api.create_role(alt_role['id'], alt_role) self.assignment_api.add_role_to_user_and_project( self.user_foo['id'], self.tenant_bar['id'], role['id']) self.assignment_api.add_role_to_user_and_project( self.user_foo['id'], self.tenant_bar['id'], alt_role['id']) self.role_api.delete_role(role['id']) roles_ref = self.assignment_api.get_roles_for_user_and_project( self.user_foo['id'], self.tenant_bar['id']) self.assertNotIn(role['id'], roles_ref) self.assertIn(alt_role['id'], roles_ref) def test_list_projects_for_user(self): domain = unit.new_domain_ref() self.resource_api.create_domain(domain['id'], domain) user1 = unit.new_user_ref(domain_id=domain['id']) user1 = self.identity_api.create_user(user1) user_projects = self.assignment_api.list_projects_for_user(user1['id']) self.assertEqual(0, len(user_projects)) self.assignment_api.create_grant(user_id=user1['id'], project_id=self.tenant_bar['id'], role_id=self.role_member['id']) self.assignment_api.create_grant(user_id=user1['id'], project_id=self.tenant_baz['id'], role_id=self.role_member['id']) user_projects = self.assignment_api.list_projects_for_user(user1['id']) self.assertEqual(2, len(user_projects)) def test_list_projects_for_user_with_grants(self): # Create two groups each with a role on a different project, and # make user1 a member of both groups. Both these new projects # should now be included, along with any direct user grants. domain = unit.new_domain_ref() self.resource_api.create_domain(domain['id'], domain) user1 = unit.new_user_ref(domain_id=domain['id']) user1 = self.identity_api.create_user(user1) group1 = unit.new_group_ref(domain_id=domain['id']) group1 = self.identity_api.create_group(group1) group2 = unit.new_group_ref(domain_id=domain['id']) group2 = self.identity_api.create_group(group2) project1 = unit.new_project_ref(domain_id=domain['id']) self.resource_api.create_project(project1['id'], project1) project2 = unit.new_project_ref(domain_id=domain['id']) self.resource_api.create_project(project2['id'], project2) self.identity_api.add_user_to_group(user1['id'], group1['id']) self.identity_api.add_user_to_group(user1['id'], group2['id']) # Create 3 grants, one user grant, the other two as group grants self.assignment_api.create_grant(user_id=user1['id'], project_id=self.tenant_bar['id'], role_id=self.role_member['id']) self.assignment_api.create_grant(group_id=group1['id'], project_id=project1['id'], role_id=self.role_admin['id']) self.assignment_api.create_grant(group_id=group2['id'], project_id=project2['id'], role_id=self.role_admin['id']) user_projects = self.assignment_api.list_projects_for_user(user1['id']) self.assertEqual(3, len(user_projects)) def test_create_grant_no_user(self): # If call create_grant with a user that doesn't exist, doesn't fail. self.assignment_api.create_grant( self.role_other['id'], user_id=uuid.uuid4().hex, project_id=self.tenant_bar['id']) def test_create_grant_no_group(self): # If call create_grant with a group that doesn't exist, doesn't fail. self.assignment_api.create_grant( self.role_other['id'], group_id=uuid.uuid4().hex, project_id=self.tenant_bar['id']) def test_delete_group_removes_role_assignments(self): # When a group is deleted any role assignments for the group are # removed. MEMBER_ROLE_ID = 'member' def get_member_assignments(): assignments = self.assignment_api.list_role_assignments() return [x for x in assignments if x['role_id'] == MEMBER_ROLE_ID] orig_member_assignments = get_member_assignments() # Create a group. new_group = unit.new_group_ref( domain_id=CONF.identity.default_domain_id) new_group = self.identity_api.create_group(new_group) # Create a project. new_project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id) self.resource_api.create_project(new_project['id'], new_project) # Assign a role to the group. self.assignment_api.create_grant( group_id=new_group['id'], project_id=new_project['id'], role_id=MEMBER_ROLE_ID) # Delete the group. self.identity_api.delete_group(new_group['id']) # Check that the role assignment for the group is gone member_assignments = get_member_assignments() self.assertThat(member_assignments, matchers.Equals(orig_member_assignments)) def test_get_roles_for_groups_on_domain(self): """Test retrieving group domain roles. Test Plan: - Create a domain, three groups and three roles - Assign one an inherited and the others a non-inherited group role to the domain - Ensure that only the non-inherited roles are returned on the domain """ domain1 = unit.new_domain_ref() self.resource_api.create_domain(domain1['id'], domain1) group_list = [] group_id_list = [] role_list = [] for _ in range(3): group = unit.new_group_ref(domain_id=domain1['id']) group = self.identity_api.create_group(group) group_list.append(group) group_id_list.append(group['id']) role = unit.new_role_ref() self.role_api.create_role(role['id'], role) role_list.append(role) # Assign the roles - one is inherited self.assignment_api.create_grant(group_id=group_list[0]['id'], domain_id=domain1['id'], role_id=role_list[0]['id']) self.assignment_api.create_grant(group_id=group_list[1]['id'], domain_id=domain1['id'], role_id=role_list[1]['id']) self.assignment_api.create_grant(group_id=group_list[2]['id'], domain_id=domain1['id'], role_id=role_list[2]['id'], inherited_to_projects=True) # Now get the effective roles for the groups on the domain project. We # shouldn't get back the inherited role. role_refs = self.assignment_api.get_roles_for_groups( group_id_list, domain_id=domain1['id']) self.assertThat(role_refs, matchers.HasLength(2)) self.assertIn(role_list[0], role_refs) self.assertIn(role_list[1], role_refs) def test_get_roles_for_groups_on_project(self): """Test retrieving group project roles. Test Plan: - Create two domains, two projects, six groups and six roles - Project1 is in Domain1, Project2 is in Domain2 - Domain2/Project2 are spoilers - Assign a different direct group role to each project as well as both an inherited and non-inherited role to each domain - Get the group roles for Project 1 - depending on whether we have enabled inheritance, we should either get back just the direct role or both the direct one plus the inherited domain role from Domain 1 """ domain1 = unit.new_domain_ref() self.resource_api.create_domain(domain1['id'], domain1) domain2 = unit.new_domain_ref() self.resource_api.create_domain(domain2['id'], domain2) project1 = unit.new_project_ref(domain_id=domain1['id']) self.resource_api.create_project(project1['id'], project1) project2 = unit.new_project_ref(domain_id=domain2['id']) self.resource_api.create_project(project2['id'], project2) group_list = [] group_id_list = [] role_list = [] for _ in range(6): group = unit.new_group_ref(domain_id=domain1['id']) group = self.identity_api.create_group(group) group_list.append(group) group_id_list.append(group['id']) role = unit.new_role_ref() self.role_api.create_role(role['id'], role) role_list.append(role) # Assign the roles - one inherited and one non-inherited on Domain1, # plus one on Project1 self.assignment_api.create_grant(group_id=group_list[0]['id'], domain_id=domain1['id'], role_id=role_list[0]['id']) self.assignment_api.create_grant(group_id=group_list[1]['id'], domain_id=domain1['id'], role_id=role_list[1]['id'], inherited_to_projects=True) self.assignment_api.create_grant(group_id=group_list[2]['id'], project_id=project1['id'], role_id=role_list[2]['id']) # ...and a duplicate set of spoiler assignments to Domain2/Project2 self.assignment_api.create_grant(group_id=group_list[3]['id'], domain_id=domain2['id'], role_id=role_list[3]['id']) self.assignment_api.create_grant(group_id=group_list[4]['id'], domain_id=domain2['id'], role_id=role_list[4]['id'], inherited_to_projects=True) self.assignment_api.create_grant(group_id=group_list[5]['id'], project_id=project2['id'], role_id=role_list[5]['id']) # Now get the effective roles for all groups on the Project1. With # inheritance off, we should only get back the direct role. self.config_fixture.config(group='os_inherit', enabled=False) role_refs = self.assignment_api.get_roles_for_groups( group_id_list, project_id=project1['id']) self.assertThat(role_refs, matchers.HasLength(1)) self.assertIn(role_list[2], role_refs) # With inheritance on, we should also get back the inherited role from # its owning domain. self.config_fixture.config(group='os_inherit', enabled=True) role_refs = self.assignment_api.get_roles_for_groups( group_id_list, project_id=project1['id']) self.assertThat(role_refs, matchers.HasLength(2)) self.assertIn(role_list[1], role_refs) self.assertIn(role_list[2], role_refs) def test_list_domains_for_groups(self): """Test retrieving domains for a list of groups. Test Plan: - Create three domains, three groups and one role - Assign a non-inherited group role to two domains, and an inherited group role to the third - Ensure only the domains with non-inherited roles are returned """ domain_list = [] group_list = [] group_id_list = [] for _ in range(3): domain = unit.new_domain_ref() self.resource_api.create_domain(domain['id'], domain) domain_list.append(domain) group = unit.new_group_ref(domain_id=domain['id']) group = self.identity_api.create_group(group) group_list.append(group) group_id_list.append(group['id']) role1 = unit.new_role_ref() self.role_api.create_role(role1['id'], role1) # Assign the roles - one is inherited self.assignment_api.create_grant(group_id=group_list[0]['id'], domain_id=domain_list[0]['id'], role_id=role1['id']) self.assignment_api.create_grant(group_id=group_list[1]['id'], domain_id=domain_list[1]['id'], role_id=role1['id']) self.assignment_api.create_grant(group_id=group_list[2]['id'], domain_id=domain_list[2]['id'], role_id=role1['id'], inherited_to_projects=True) # Now list the domains that have roles for any of the 3 groups # We shouldn't get back domain[2] since that had an inherited role. domain_refs = ( self.assignment_api.list_domains_for_groups(group_id_list)) self.assertThat(domain_refs, matchers.HasLength(2)) self.assertIn(domain_list[0], domain_refs) self.assertIn(domain_list[1], domain_refs) def test_list_projects_for_groups(self): """Test retrieving projects for a list of groups. Test Plan: - Create two domains, four projects, seven groups and seven roles - Project1-3 are in Domain1, Project4 is in Domain2 - Domain2/Project4 are spoilers - Project1 and 2 have direct group roles, Project3 has no direct roles but should inherit a group role from Domain1 - Get the projects for the group roles that are assigned to Project1 Project2 and the inherited one on Domain1. Depending on whether we have enabled inheritance, we should either get back just the projects with direct roles (Project 1 and 2) or also Project3 due to its inherited role from Domain1. """ domain1 = unit.new_domain_ref() self.resource_api.create_domain(domain1['id'], domain1) domain2 = unit.new_domain_ref() self.resource_api.create_domain(domain2['id'], domain2) project1 = unit.new_project_ref(domain_id=domain1['id']) project1 = self.resource_api.create_project(project1['id'], project1) project2 = unit.new_project_ref(domain_id=domain1['id']) project2 = self.resource_api.create_project(project2['id'], project2) project3 = unit.new_project_ref(domain_id=domain1['id']) project3 = self.resource_api.create_project(project3['id'], project3) project4 = unit.new_project_ref(domain_id=domain2['id']) project4 = self.resource_api.create_project(project4['id'], project4) group_list = [] role_list = [] for _ in range(7): group = unit.new_group_ref(domain_id=domain1['id']) group = self.identity_api.create_group(group) group_list.append(group) role = unit.new_role_ref() self.role_api.create_role(role['id'], role) role_list.append(role) # Assign the roles - one inherited and one non-inherited on Domain1, # plus one on Project1 and Project2 self.assignment_api.create_grant(group_id=group_list[0]['id'], domain_id=domain1['id'], role_id=role_list[0]['id']) self.assignment_api.create_grant(group_id=group_list[1]['id'], domain_id=domain1['id'], role_id=role_list[1]['id'], inherited_to_projects=True) self.assignment_api.create_grant(group_id=group_list[2]['id'], project_id=project1['id'], role_id=role_list[2]['id']) self.assignment_api.create_grant(group_id=group_list[3]['id'], project_id=project2['id'], role_id=role_list[3]['id']) # ...and a few of spoiler assignments to Domain2/Project4 self.assignment_api.create_grant(group_id=group_list[4]['id'], domain_id=domain2['id'], role_id=role_list[4]['id']) self.assignment_api.create_grant(group_id=group_list[5]['id'], domain_id=domain2['id'], role_id=role_list[5]['id'], inherited_to_projects=True) self.assignment_api.create_grant(group_id=group_list[6]['id'], project_id=project4['id'], role_id=role_list[6]['id']) # Now get the projects for the groups that have roles on Project1, # Project2 and the inherited role on Domain!. With inheritance off, # we should only get back the projects with direct role. self.config_fixture.config(group='os_inherit', enabled=False) group_id_list = [group_list[1]['id'], group_list[2]['id'], group_list[3]['id']] project_refs = ( self.assignment_api.list_projects_for_groups(group_id_list)) self.assertThat(project_refs, matchers.HasLength(2)) self.assertIn(project1, project_refs) self.assertIn(project2, project_refs) # With inheritance on, we should also get back the Project3 due to the # inherited role from its owning domain. self.config_fixture.config(group='os_inherit', enabled=True) project_refs = ( self.assignment_api.list_projects_for_groups(group_id_list)) self.assertThat(project_refs, matchers.HasLength(3)) self.assertIn(project1, project_refs) self.assertIn(project2, project_refs) self.assertIn(project3, project_refs) def test_update_role_no_name(self): # A user can update a role and not include the name. # description is picked just because it's not name. self.role_api.update_role(self.role_member['id'], {'description': uuid.uuid4().hex}) # If the previous line didn't raise an exception then the test passes. def test_update_role_same_name(self): # A user can update a role and set the name to be the same as it was. self.role_api.update_role(self.role_member['id'], {'name': self.role_member['name']}) # If the previous line didn't raise an exception then the test passes. def test_list_role_assignment_containing_names(self): # Create Refs new_role = unit.new_role_ref() new_domain = self._get_domain_fixture() new_user = unit.new_user_ref(domain_id=new_domain['id']) new_project = unit.new_project_ref(domain_id=new_domain['id']) new_group = unit.new_group_ref(domain_id=new_domain['id']) # Create entities new_role = self.role_api.create_role(new_role['id'], new_role) new_user = self.identity_api.create_user(new_user) new_group = self.identity_api.create_group(new_group) self.resource_api.create_project(new_project['id'], new_project) self.assignment_api.create_grant(user_id=new_user['id'], project_id=new_project['id'], role_id=new_role['id']) self.assignment_api.create_grant(group_id=new_group['id'], project_id=new_project['id'], role_id=new_role['id']) self.assignment_api.create_grant(domain_id=new_domain['id'], user_id=new_user['id'], role_id=new_role['id']) # Get the created assignments with the include_names flag _asgmt_prj = self.assignment_api.list_role_assignments( user_id=new_user['id'], project_id=new_project['id'], include_names=True) _asgmt_grp = self.assignment_api.list_role_assignments( group_id=new_group['id'], project_id=new_project['id'], include_names=True) _asgmt_dmn = self.assignment_api.list_role_assignments( domain_id=new_domain['id'], user_id=new_user['id'], include_names=True) # Make sure we can get back the correct number of assignments self.assertThat(_asgmt_prj, matchers.HasLength(1)) self.assertThat(_asgmt_grp, matchers.HasLength(1)) self.assertThat(_asgmt_dmn, matchers.HasLength(1)) # get the first assignment first_asgmt_prj = _asgmt_prj[0] first_asgmt_grp = _asgmt_grp[0] first_asgmt_dmn = _asgmt_dmn[0] # Assert the names are correct in the project response self.assertEqual(new_project['name'], first_asgmt_prj['project_name']) self.assertEqual(new_project['domain_id'], first_asgmt_prj['project_domain_id']) self.assertEqual(new_user['name'], first_asgmt_prj['user_name']) self.assertEqual(new_user['domain_id'], first_asgmt_prj['user_domain_id']) self.assertEqual(new_role['name'], first_asgmt_prj['role_name']) # Assert the names are correct in the group response self.assertEqual(new_group['name'], first_asgmt_grp['group_name']) self.assertEqual(new_group['domain_id'], first_asgmt_grp['group_domain_id']) self.assertEqual(new_project['name'], first_asgmt_grp['project_name']) self.assertEqual(new_project['domain_id'], first_asgmt_grp['project_domain_id']) self.assertEqual(new_role['name'], first_asgmt_grp['role_name']) # Assert the names are correct in the domain response self.assertEqual(new_domain['name'], first_asgmt_dmn['domain_name']) self.assertEqual(new_user['name'], first_asgmt_dmn['user_name']) self.assertEqual(new_user['domain_id'], first_asgmt_dmn['user_domain_id']) self.assertEqual(new_role['name'], first_asgmt_dmn['role_name']) def test_list_role_assignment_does_not_contain_names(self): """Test names are not included with list role assignments. Scenario: - names are NOT included by default - names are NOT included when include_names=False """ def assert_does_not_contain_names(assignment): first_asgmt_prj = assignment[0] self.assertNotIn('project_name', first_asgmt_prj) self.assertNotIn('project_domain_id', first_asgmt_prj) self.assertNotIn('user_name', first_asgmt_prj) self.assertNotIn('user_domain_id', first_asgmt_prj) self.assertNotIn('role_name', first_asgmt_prj) # Create Refs new_role = unit.new_role_ref() new_domain = self._get_domain_fixture() new_user = unit.new_user_ref(domain_id=new_domain['id']) new_project = unit.new_project_ref(domain_id=new_domain['id']) # Create entities new_role = self.role_api.create_role(new_role['id'], new_role) new_user = self.identity_api.create_user(new_user) self.resource_api.create_project(new_project['id'], new_project) self.assignment_api.create_grant(user_id=new_user['id'], project_id=new_project['id'], role_id=new_role['id']) # Get the created assignments with NO include_names flag role_assign_without_names = self.assignment_api.list_role_assignments( user_id=new_user['id'], project_id=new_project['id']) assert_does_not_contain_names(role_assign_without_names) # Get the created assignments with include_names=False role_assign_without_names = self.assignment_api.list_role_assignments( user_id=new_user['id'], project_id=new_project['id'], include_names=False) assert_does_not_contain_names(role_assign_without_names) def test_delete_user_assignments_user_same_id_as_group(self): """Test deleting user assignments when user_id == group_id. In this scenario, only user assignments must be deleted (i.e. USER_DOMAIN or USER_PROJECT). Test plan: * Create a user and a group with the same ID; * Create four roles and assign them to both user and group; * Delete all user assignments; * Group assignments must stay intact. """ # Create a common ID common_id = uuid.uuid4().hex # Create a project project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id) project = self.resource_api.create_project(project['id'], project) # Create a user user = unit.new_user_ref(id=common_id, domain_id=CONF.identity.default_domain_id) user = self.identity_api.driver.create_user(common_id, user) self.assertEqual(common_id, user['id']) # Create a group group = unit.new_group_ref(id=common_id, domain_id=CONF.identity.default_domain_id) group = self.identity_api.driver.create_group(common_id, group) self.assertEqual(common_id, group['id']) # Create four roles roles = [] for _ in range(4): role = unit.new_role_ref() roles.append(self.role_api.create_role(role['id'], role)) # Assign roles for user self.assignment_api.driver.create_grant( user_id=user['id'], domain_id=CONF.identity.default_domain_id, role_id=roles[0]['id']) self.assignment_api.driver.create_grant(user_id=user['id'], project_id=project['id'], role_id=roles[1]['id']) # Assign roles for group self.assignment_api.driver.create_grant( group_id=group['id'], domain_id=CONF.identity.default_domain_id, role_id=roles[2]['id']) self.assignment_api.driver.create_grant(group_id=group['id'], project_id=project['id'], role_id=roles[3]['id']) # Make sure they were assigned user_assignments = self.assignment_api.list_role_assignments( user_id=user['id']) self.assertThat(user_assignments, matchers.HasLength(2)) group_assignments = self.assignment_api.list_role_assignments( group_id=group['id']) self.assertThat(group_assignments, matchers.HasLength(2)) # Delete user assignments self.assignment_api.delete_user_assignments(user_id=user['id']) # Assert only user assignments were deleted user_assignments = self.assignment_api.list_role_assignments( user_id=user['id']) self.assertThat(user_assignments, matchers.HasLength(0)) group_assignments = self.assignment_api.list_role_assignments( group_id=group['id']) self.assertThat(group_assignments, matchers.HasLength(2)) # Make sure these remaining assignments are group-related for assignment in group_assignments: self.assertThat(assignment.keys(), matchers.Contains('group_id')) def test_delete_group_assignments_group_same_id_as_user(self): """Test deleting group assignments when group_id == user_id. In this scenario, only group assignments must be deleted (i.e. GROUP_DOMAIN or GROUP_PROJECT). Test plan: * Create a group and a user with the same ID; * Create four roles and assign them to both group and user; * Delete all group assignments; * User assignments must stay intact. """ # Create a common ID common_id = uuid.uuid4().hex # Create a project project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id) project = self.resource_api.create_project(project['id'], project) # Create a user user = unit.new_user_ref(id=common_id, domain_id=CONF.identity.default_domain_id) user = self.identity_api.driver.create_user(common_id, user) self.assertEqual(common_id, user['id']) # Create a group group = unit.new_group_ref(id=common_id, domain_id=CONF.identity.default_domain_id) group = self.identity_api.driver.create_group(common_id, group) self.assertEqual(common_id, group['id']) # Create four roles roles = [] for _ in range(4): role = unit.new_role_ref() roles.append(self.role_api.create_role(role['id'], role)) # Assign roles for user self.assignment_api.driver.create_grant( user_id=user['id'], domain_id=CONF.identity.default_domain_id, role_id=roles[0]['id']) self.assignment_api.driver.create_grant(user_id=user['id'], project_id=project['id'], role_id=roles[1]['id']) # Assign roles for group self.assignment_api.driver.create_grant( group_id=group['id'], domain_id=CONF.identity.default_domain_id, role_id=roles[2]['id']) self.assignment_api.driver.create_grant(group_id=group['id'], project_id=project['id'], role_id=roles[3]['id']) # Make sure they were assigned user_assignments = self.assignment_api.list_role_assignments( user_id=user['id']) self.assertThat(user_assignments, matchers.HasLength(2)) group_assignments = self.assignment_api.list_role_assignments( group_id=group['id']) self.assertThat(group_assignments, matchers.HasLength(2)) # Delete group assignments self.assignment_api.delete_group_assignments(group_id=group['id']) # Assert only group assignments were deleted group_assignments = self.assignment_api.list_role_assignments( group_id=group['id']) self.assertThat(group_assignments, matchers.HasLength(0)) user_assignments = self.assignment_api.list_role_assignments( user_id=user['id']) self.assertThat(user_assignments, matchers.HasLength(2)) # Make sure these remaining assignments are user-related for assignment in group_assignments: self.assertThat(assignment.keys(), matchers.Contains('user_id')) def test_remove_foreign_assignments_when_deleting_a_domain(self): # A user and a group are in default domain and have assigned a role on # two new domains. This test makes sure that when one of the new # domains is deleted, the role assignments for the user and the group # from the default domain are deleted only on that domain. group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id) group = self.identity_api.create_group(group) role = unit.new_role_ref() role = self.role_api.create_role(role['id'], role) new_domains = [unit.new_domain_ref(), unit.new_domain_ref()] for new_domain in new_domains: self.resource_api.create_domain(new_domain['id'], new_domain) self.assignment_api.create_grant(group_id=group['id'], domain_id=new_domain['id'], role_id=role['id']) self.assignment_api.create_grant(user_id=self.user_two['id'], domain_id=new_domain['id'], role_id=role['id']) # Check there are 4 role assignments for that role role_assignments = self.assignment_api.list_role_assignments( role_id=role['id']) self.assertThat(role_assignments, matchers.HasLength(4)) # Delete first new domain and check only 2 assignments were left self.resource_api.update_domain(new_domains[0]['id'], {'enabled': False}) self.resource_api.delete_domain(new_domains[0]['id']) role_assignments = self.assignment_api.list_role_assignments( role_id=role['id']) self.assertThat(role_assignments, matchers.HasLength(2)) # Delete second new domain and check no assignments were left self.resource_api.update_domain(new_domains[1]['id'], {'enabled': False}) self.resource_api.delete_domain(new_domains[1]['id']) role_assignments = self.assignment_api.list_role_assignments( role_id=role['id']) self.assertEqual([], role_assignments) class InheritanceTests(AssignmentTestHelperMixin): def test_role_assignments_user_domain_to_project_inheritance(self): test_plan = { 'entities': {'domains': {'users': 2, 'projects': 1}, 'roles': 3}, 'assignments': [{'user': 0, 'role': 0, 'domain': 0}, {'user': 0, 'role': 1, 'project': 0}, {'user': 0, 'role': 2, 'domain': 0, 'inherited_to_projects': True}, {'user': 1, 'role': 1, 'project': 0}], 'tests': [ # List all direct assignments for user[0] {'params': {'user': 0}, 'results': [{'user': 0, 'role': 0, 'domain': 0}, {'user': 0, 'role': 1, 'project': 0}, {'user': 0, 'role': 2, 'domain': 0, 'inherited_to_projects': 'projects'}]}, # Now the effective ones - so the domain role should turn into # a project role {'params': {'user': 0, 'effective': True}, 'results': [{'user': 0, 'role': 0, 'domain': 0}, {'user': 0, 'role': 1, 'project': 0}, {'user': 0, 'role': 2, 'project': 0, 'indirect': {'domain': 0}}]}, # Narrow down to effective roles for user[0] and project[0] {'params': {'user': 0, 'project': 0, 'effective': True}, 'results': [{'user': 0, 'role': 1, 'project': 0}, {'user': 0, 'role': 2, 'project': 0, 'indirect': {'domain': 0}}]} ] } self.config_fixture.config(group='os_inherit', enabled=True) self.execute_assignment_plan(test_plan) def test_inherited_role_assignments_excluded_if_os_inherit_false(self): test_plan = { 'entities': {'domains': {'users': 2, 'groups': 1, 'projects': 1}, 'roles': 4}, 'group_memberships': [{'group': 0, 'users': [0]}], 'assignments': [{'user': 0, 'role': 0, 'domain': 0}, {'user': 0, 'role': 1, 'project': 0}, {'user': 0, 'role': 2, 'domain': 0, 'inherited_to_projects': True}, {'user': 1, 'role': 1, 'project': 0}, {'group': 0, 'role': 3, 'project': 0}], 'tests': [ # List all direct assignments for user[0], since os-inherit is # disabled, we should not see the inherited role {'params': {'user': 0}, 'results': [{'user': 0, 'role': 0, 'domain': 0}, {'user': 0, 'role': 1, 'project': 0}]}, # Same in effective mode - inherited roles should not be # included or expanded...but the group role should now # turn up as a user role, since group expansion is not # part of os-inherit. {'params': {'user': 0, 'effective': True}, 'results': [{'user': 0, 'role': 0, 'domain': 0}, {'user': 0, 'role': 1, 'project': 0}, {'user': 0, 'role': 3, 'project': 0, 'indirect': {'group': 0}}]}, ] } self.config_fixture.config(group='os_inherit', enabled=False) self.execute_assignment_plan(test_plan) def _test_crud_inherited_and_direct_assignment(self, **kwargs): """Tests inherited and direct assignments for the actor and target Ensure it is possible to create both inherited and direct role assignments for the same actor on the same target. The actor and the target are specified in the kwargs as ('user_id' or 'group_id') and ('project_id' or 'domain_id'), respectively. """ self.config_fixture.config(group='os_inherit', enabled=True) # Create a new role to avoid assignments loaded from default fixtures role = unit.new_role_ref() role = self.role_api.create_role(role['id'], role) # Define the common assignment entity assignment_entity = {'role_id': role['id']} assignment_entity.update(kwargs) # Define assignments under test direct_assignment_entity = assignment_entity.copy() inherited_assignment_entity = assignment_entity.copy() inherited_assignment_entity['inherited_to_projects'] = 'projects' # Create direct assignment and check grants self.assignment_api.create_grant(inherited_to_projects=False, **assignment_entity) grants = self.assignment_api.list_role_assignments(role_id=role['id']) self.assertThat(grants, matchers.HasLength(1)) self.assertIn(direct_assignment_entity, grants) # Now add inherited assignment and check grants self.assignment_api.create_grant(inherited_to_projects=True, **assignment_entity) grants = self.assignment_api.list_role_assignments(role_id=role['id']) self.assertThat(grants, matchers.HasLength(2)) self.assertIn(direct_assignment_entity, grants) self.assertIn(inherited_assignment_entity, grants) # Delete both and check grants self.assignment_api.delete_grant(inherited_to_projects=False, **assignment_entity) self.assignment_api.delete_grant(inherited_to_projects=True, **assignment_entity) grants = self.assignment_api.list_role_assignments(role_id=role['id']) self.assertEqual([], grants) def test_crud_inherited_and_direct_assignment_for_user_on_domain(self): self._test_crud_inherited_and_direct_assignment( user_id=self.user_foo['id'], domain_id=CONF.identity.default_domain_id) def test_crud_inherited_and_direct_assignment_for_group_on_domain(self): group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id) group = self.identity_api.create_group(group) self._test_crud_inherited_and_direct_assignment( group_id=group['id'], domain_id=CONF.identity.default_domain_id) def test_crud_inherited_and_direct_assignment_for_user_on_project(self): self._test_crud_inherited_and_direct_assignment( user_id=self.user_foo['id'], project_id=self.tenant_baz['id']) def test_crud_inherited_and_direct_assignment_for_group_on_project(self): group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id) group = self.identity_api.create_group(group) self._test_crud_inherited_and_direct_assignment( group_id=group['id'], project_id=self.tenant_baz['id']) def test_inherited_role_grants_for_user(self): """Test inherited user roles. Test Plan: - Enable OS-INHERIT extension - Create 3 roles - Create a domain, with a project and a user - Check no roles yet exit - Assign a direct user role to the project and a (non-inherited) user role to the domain - Get a list of effective roles - should only get the one direct role - Now add an inherited user role to the domain - Get a list of effective roles - should have two roles, one direct and one by virtue of the inherited user role - Also get effective roles for the domain - the role marked as inherited should not show up """ self.config_fixture.config(group='os_inherit', enabled=True) role_list = [] for _ in range(3): role = unit.new_role_ref() self.role_api.create_role(role['id'], role) role_list.append(role) domain1 = unit.new_domain_ref() self.resource_api.create_domain(domain1['id'], domain1) user1 = unit.new_user_ref(domain_id=domain1['id']) user1 = self.identity_api.create_user(user1) project1 = unit.new_project_ref(domain_id=domain1['id']) self.resource_api.create_project(project1['id'], project1) roles_ref = self.assignment_api.list_grants( user_id=user1['id'], project_id=project1['id']) self.assertEqual(0, len(roles_ref)) # Create the first two roles - the domain one is not inherited self.assignment_api.create_grant(user_id=user1['id'], project_id=project1['id'], role_id=role_list[0]['id']) self.assignment_api.create_grant(user_id=user1['id'], domain_id=domain1['id'], role_id=role_list[1]['id']) # Now get the effective roles for the user and project, this # should only include the direct role assignment on the project combined_list = self.assignment_api.get_roles_for_user_and_project( user1['id'], project1['id']) self.assertEqual(1, len(combined_list)) self.assertIn(role_list[0]['id'], combined_list) # Now add an inherited role on the domain self.assignment_api.create_grant(user_id=user1['id'], domain_id=domain1['id'], role_id=role_list[2]['id'], inherited_to_projects=True) # Now get the effective roles for the user and project again, this # should now include the inherited role on the domain combined_list = self.assignment_api.get_roles_for_user_and_project( user1['id'], project1['id']) self.assertEqual(2, len(combined_list)) self.assertIn(role_list[0]['id'], combined_list) self.assertIn(role_list[2]['id'], combined_list) # Finally, check that the inherited role does not appear as a valid # directly assigned role on the domain itself combined_role_list = self.assignment_api.get_roles_for_user_and_domain( user1['id'], domain1['id']) self.assertEqual(1, len(combined_role_list)) self.assertIn(role_list[1]['id'], combined_role_list) # TODO(henry-nash): The test above uses get_roles_for_user_and_project # and get_roles_for_user_and_domain, which will, in a subsequent patch, # be re-implemented to simply call list_role_assignments (see blueprint # remove-role-metadata). # # The test plan below therefore mirrors this test, to ensure that # list_role_assignments works the same. Once get_roles_for_user_and # project/domain have been re-implemented then the manual tests above # can be refactored to simply ensure it gives the same answers. test_plan = { # A domain with a user & project, plus 3 roles. 'entities': {'domains': {'users': 1, 'projects': 1}, 'roles': 3}, 'assignments': [{'user': 0, 'role': 0, 'project': 0}, {'user': 0, 'role': 1, 'domain': 0}, {'user': 0, 'role': 2, 'domain': 0, 'inherited_to_projects': True}], 'tests': [ # List all effective assignments for user[0] on project[0]. # Should get one direct role and one inherited role. {'params': {'user': 0, 'project': 0, 'effective': True}, 'results': [{'user': 0, 'role': 0, 'project': 0}, {'user': 0, 'role': 2, 'project': 0, 'indirect': {'domain': 0}}]}, # Ensure effective mode on the domain does not list the # inherited role on that domain {'params': {'user': 0, 'domain': 0, 'effective': True}, 'results': [{'user': 0, 'role': 1, 'domain': 0}]}, # Ensure non-inherited mode also only returns the non-inherited # role on the domain {'params': {'user': 0, 'domain': 0, 'inherited': False}, 'results': [{'user': 0, 'role': 1, 'domain': 0}]}, ] } self.execute_assignment_plan(test_plan) def test_inherited_role_grants_for_group(self): """Test inherited group roles. Test Plan: - Enable OS-INHERIT extension - Create 4 roles - Create a domain, with a project, user and two groups - Make the user a member of both groups - Check no roles yet exit - Assign a direct user role to the project and a (non-inherited) group role on the domain - Get a list of effective roles - should only get the one direct role - Now add two inherited group roles to the domain - Get a list of effective roles - should have three roles, one direct and two by virtue of inherited group roles """ self.config_fixture.config(group='os_inherit', enabled=True) role_list = [] for _ in range(4): role = unit.new_role_ref() self.role_api.create_role(role['id'], role) role_list.append(role) domain1 = unit.new_domain_ref() self.resource_api.create_domain(domain1['id'], domain1) user1 = unit.new_user_ref(domain_id=domain1['id']) user1 = self.identity_api.create_user(user1) group1 = unit.new_group_ref(domain_id=domain1['id']) group1 = self.identity_api.create_group(group1) group2 = unit.new_group_ref(domain_id=domain1['id']) group2 = self.identity_api.create_group(group2) project1 = unit.new_project_ref(domain_id=domain1['id']) self.resource_api.create_project(project1['id'], project1) self.identity_api.add_user_to_group(user1['id'], group1['id']) self.identity_api.add_user_to_group(user1['id'], group2['id']) roles_ref = self.assignment_api.list_grants( user_id=user1['id'], project_id=project1['id']) self.assertEqual(0, len(roles_ref)) # Create two roles - the domain one is not inherited self.assignment_api.create_grant(user_id=user1['id'], project_id=project1['id'], role_id=role_list[0]['id']) self.assignment_api.create_grant(group_id=group1['id'], domain_id=domain1['id'], role_id=role_list[1]['id']) # Now get the effective roles for the user and project, this # should only include the direct role assignment on the project combined_list = self.assignment_api.get_roles_for_user_and_project( user1['id'], project1['id']) self.assertEqual(1, len(combined_list)) self.assertIn(role_list[0]['id'], combined_list) # Now add to more group roles, both inherited, to the domain self.assignment_api.create_grant(group_id=group2['id'], domain_id=domain1['id'], role_id=role_list[2]['id'], inherited_to_projects=True) self.assignment_api.create_grant(group_id=group2['id'], domain_id=domain1['id'], role_id=role_list[3]['id'], inherited_to_projects=True) # Now get the effective roles for the user and project again, this # should now include the inherited roles on the domain combined_list = self.assignment_api.get_roles_for_user_and_project( user1['id'], project1['id']) self.assertEqual(3, len(combined_list)) self.assertIn(role_list[0]['id'], combined_list) self.assertIn(role_list[2]['id'], combined_list) self.assertIn(role_list[3]['id'], combined_list) # TODO(henry-nash): The test above uses get_roles_for_user_and_project # which will, in a subsequent patch, be re-implemented to simply call # list_role_assignments (see blueprint remove-role-metadata). # # The test plan below therefore mirrors this test, to ensure that # list_role_assignments works the same. Once # get_roles_for_user_and_project has been re-implemented then the # manual tests above can be refactored to simply ensure it gives # the same answers. test_plan = { # A domain with a user and project, 2 groups, plus 4 roles. 'entities': {'domains': {'users': 1, 'projects': 1, 'groups': 2}, 'roles': 4}, 'group_memberships': [{'group': 0, 'users': [0]}, {'group': 1, 'users': [0]}], 'assignments': [{'user': 0, 'role': 0, 'project': 0}, {'group': 0, 'role': 1, 'domain': 0}, {'group': 1, 'role': 2, 'domain': 0, 'inherited_to_projects': True}, {'group': 1, 'role': 3, 'domain': 0, 'inherited_to_projects': True}], 'tests': [ # List all effective assignments for user[0] on project[0]. # Should get one direct role and both inherited roles, but # not the direct one on domain[0], even though user[0] is # in group[0]. {'params': {'user': 0, 'project': 0, 'effective': True}, 'results': [{'user': 0, 'role': 0, 'project': 0}, {'user': 0, 'role': 2, 'project': 0, 'indirect': {'domain': 0, 'group': 1}}, {'user': 0, 'role': 3, 'project': 0, 'indirect': {'domain': 0, 'group': 1}}]} ] } self.execute_assignment_plan(test_plan) def test_list_projects_for_user_with_inherited_grants(self): """Test inherited user roles. Test Plan: - Enable OS-INHERIT extension - Create a domain, with two projects and a user - Assign an inherited user role on the domain, as well as a direct user role to a separate project in a different domain - Get a list of projects for user, should return all three projects """ self.config_fixture.config(group='os_inherit', enabled=True) domain = unit.new_domain_ref() self.resource_api.create_domain(domain['id'], domain) user1 = unit.new_user_ref(domain_id=domain['id']) user1 = self.identity_api.create_user(user1) project1 = unit.new_project_ref(domain_id=domain['id']) self.resource_api.create_project(project1['id'], project1) project2 = unit.new_project_ref(domain_id=domain['id']) self.resource_api.create_project(project2['id'], project2) # Create 2 grants, one on a project and one inherited grant # on the domain self.assignment_api.create_grant(user_id=user1['id'], project_id=self.tenant_bar['id'], role_id=self.role_member['id']) self.assignment_api.create_grant(user_id=user1['id'], domain_id=domain['id'], role_id=self.role_admin['id'], inherited_to_projects=True) # Should get back all three projects, one by virtue of the direct # grant, plus both projects in the domain user_projects = self.assignment_api.list_projects_for_user(user1['id']) self.assertEqual(3, len(user_projects)) # TODO(henry-nash): The test above uses list_projects_for_user # which may, in a subsequent patch, be re-implemented to call # list_role_assignments and then report only the distinct projects. # # The test plan below therefore mirrors this test, to ensure that # list_role_assignments works the same. Once list_projects_for_user # has been re-implemented then the manual tests above can be # refactored. test_plan = { # A domain with 1 project, plus a second domain with 2 projects, # as well as a user. Also, create 2 roles. 'entities': {'domains': [{'projects': 1}, {'users': 1, 'projects': 2}], 'roles': 2}, 'assignments': [{'user': 0, 'role': 0, 'project': 0}, {'user': 0, 'role': 1, 'domain': 1, 'inherited_to_projects': True}], 'tests': [ # List all effective assignments for user[0] # Should get one direct role plus one inherited role for each # project in domain {'params': {'user': 0, 'effective': True}, 'results': [{'user': 0, 'role': 0, 'project': 0}, {'user': 0, 'role': 1, 'project': 1, 'indirect': {'domain': 1}}, {'user': 0, 'role': 1, 'project': 2, 'indirect': {'domain': 1}}]} ] } self.execute_assignment_plan(test_plan) def test_list_projects_for_user_with_inherited_user_project_grants(self): """Test inherited role assignments for users on nested projects. Test Plan: - Enable OS-INHERIT extension - Create a hierarchy of projects with one root and one leaf project - Assign an inherited user role on root project - Assign a non-inherited user role on root project - Get a list of projects for user, should return both projects - Disable OS-INHERIT extension - Get a list of projects for user, should return only root project """ # Enable OS-INHERIT extension self.config_fixture.config(group='os_inherit', enabled=True) root_project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id) root_project = self.resource_api.create_project(root_project['id'], root_project) leaf_project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id, parent_id=root_project['id']) leaf_project = self.resource_api.create_project(leaf_project['id'], leaf_project) user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user = self.identity_api.create_user(user) # Grant inherited user role self.assignment_api.create_grant(user_id=user['id'], project_id=root_project['id'], role_id=self.role_admin['id'], inherited_to_projects=True) # Grant non-inherited user role self.assignment_api.create_grant(user_id=user['id'], project_id=root_project['id'], role_id=self.role_member['id']) # Should get back both projects: because the direct role assignment for # the root project and inherited role assignment for leaf project user_projects = self.assignment_api.list_projects_for_user(user['id']) self.assertEqual(2, len(user_projects)) self.assertIn(root_project, user_projects) self.assertIn(leaf_project, user_projects) # Disable OS-INHERIT extension self.config_fixture.config(group='os_inherit', enabled=False) # Should get back just root project - due the direct role assignment user_projects = self.assignment_api.list_projects_for_user(user['id']) self.assertEqual(1, len(user_projects)) self.assertIn(root_project, user_projects) # TODO(henry-nash): The test above uses list_projects_for_user # which may, in a subsequent patch, be re-implemented to call # list_role_assignments and then report only the distinct projects. # # The test plan below therefore mirrors this test, to ensure that # list_role_assignments works the same. Once list_projects_for_user # has been re-implemented then the manual tests above can be # refactored. test_plan = { # A domain with a project and sub-project, plus a user. # Also, create 2 roles. 'entities': { 'domains': {'id': CONF.identity.default_domain_id, 'users': 1, 'projects': {'project': 1}}, 'roles': 2}, # A direct role and an inherited role on the parent 'assignments': [{'user': 0, 'role': 0, 'project': 0}, {'user': 0, 'role': 1, 'project': 0, 'inherited_to_projects': True}], 'tests': [ # List all effective assignments for user[0] - should get back # one direct role plus one inherited role. {'params': {'user': 0, 'effective': True}, 'results': [{'user': 0, 'role': 0, 'project': 0}, {'user': 0, 'role': 1, 'project': 1, 'indirect': {'project': 0}}]} ] } test_plan_with_os_inherit_disabled = { 'tests': [ # List all effective assignments for user[0] - should only get # back the one direct role. {'params': {'user': 0, 'effective': True}, 'results': [{'user': 0, 'role': 0, 'project': 0}]} ] } self.config_fixture.config(group='os_inherit', enabled=True) test_data = self.execute_assignment_plan(test_plan) self.config_fixture.config(group='os_inherit', enabled=False) # Pass the existing test data in to allow execution of 2nd test plan self.execute_assignment_cases( test_plan_with_os_inherit_disabled, test_data) def test_list_projects_for_user_with_inherited_group_grants(self): """Test inherited group roles. Test Plan: - Enable OS-INHERIT extension - Create two domains, each with two projects - Create a user and group - Make the user a member of the group - Assign a user role two projects, an inherited group role to one domain and an inherited regular role on the other domain - Get a list of projects for user, should return both pairs of projects from the domain, plus the one separate project """ self.config_fixture.config(group='os_inherit', enabled=True) domain = unit.new_domain_ref() self.resource_api.create_domain(domain['id'], domain) domain2 = unit.new_domain_ref() self.resource_api.create_domain(domain2['id'], domain2) project1 = unit.new_project_ref(domain_id=domain['id']) self.resource_api.create_project(project1['id'], project1) project2 = unit.new_project_ref(domain_id=domain['id']) self.resource_api.create_project(project2['id'], project2) project3 = unit.new_project_ref(domain_id=domain2['id']) self.resource_api.create_project(project3['id'], project3) project4 = unit.new_project_ref(domain_id=domain2['id']) self.resource_api.create_project(project4['id'], project4) user1 = unit.new_user_ref(domain_id=domain['id']) user1 = self.identity_api.create_user(user1) group1 = unit.new_group_ref(domain_id=domain['id']) group1 = self.identity_api.create_group(group1) self.identity_api.add_user_to_group(user1['id'], group1['id']) # Create 4 grants: # - one user grant on a project in domain2 # - one user grant on a project in the default domain # - one inherited user grant on domain # - one inherited group grant on domain2 self.assignment_api.create_grant(user_id=user1['id'], project_id=project3['id'], role_id=self.role_member['id']) self.assignment_api.create_grant(user_id=user1['id'], project_id=self.tenant_bar['id'], role_id=self.role_member['id']) self.assignment_api.create_grant(user_id=user1['id'], domain_id=domain['id'], role_id=self.role_admin['id'], inherited_to_projects=True) self.assignment_api.create_grant(group_id=group1['id'], domain_id=domain2['id'], role_id=self.role_admin['id'], inherited_to_projects=True) # Should get back all five projects, but without a duplicate for # project3 (since it has both a direct user role and an inherited role) user_projects = self.assignment_api.list_projects_for_user(user1['id']) self.assertEqual(5, len(user_projects)) # TODO(henry-nash): The test above uses list_projects_for_user # which may, in a subsequent patch, be re-implemented to call # list_role_assignments and then report only the distinct projects. # # The test plan below therefore mirrors this test, to ensure that # list_role_assignments works the same. Once list_projects_for_user # has been re-implemented then the manual tests above can be # refactored. test_plan = { # A domain with a 1 project, plus a second domain with 2 projects, # as well as a user & group and a 3rd domain with 2 projects. # Also, created 2 roles. 'entities': {'domains': [{'projects': 1}, {'users': 1, 'groups': 1, 'projects': 2}, {'projects': 2}], 'roles': 2}, 'group_memberships': [{'group': 0, 'users': [0]}], 'assignments': [{'user': 0, 'role': 0, 'project': 0}, {'user': 0, 'role': 0, 'project': 3}, {'user': 0, 'role': 1, 'domain': 1, 'inherited_to_projects': True}, {'user': 0, 'role': 1, 'domain': 2, 'inherited_to_projects': True}], 'tests': [ # List all effective assignments for user[0] # Should get back both direct roles plus roles on both projects # from each domain. Duplicates should not be filtered out. {'params': {'user': 0, 'effective': True}, 'results': [{'user': 0, 'role': 0, 'project': 3}, {'user': 0, 'role': 0, 'project': 0}, {'user': 0, 'role': 1, 'project': 1, 'indirect': {'domain': 1}}, {'user': 0, 'role': 1, 'project': 2, 'indirect': {'domain': 1}}, {'user': 0, 'role': 1, 'project': 3, 'indirect': {'domain': 2}}, {'user': 0, 'role': 1, 'project': 4, 'indirect': {'domain': 2}}]} ] } self.execute_assignment_plan(test_plan) def test_list_projects_for_user_with_inherited_group_project_grants(self): """Test inherited role assignments for groups on nested projects. Test Plan: - Enable OS-INHERIT extension - Create a hierarchy of projects with one root and one leaf project - Assign an inherited group role on root project - Assign a non-inherited group role on root project - Get a list of projects for user, should return both projects - Disable OS-INHERIT extension - Get a list of projects for user, should return only root project """ self.config_fixture.config(group='os_inherit', enabled=True) root_project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id) root_project = self.resource_api.create_project(root_project['id'], root_project) leaf_project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id, parent_id=root_project['id']) leaf_project = self.resource_api.create_project(leaf_project['id'], leaf_project) user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user = self.identity_api.create_user(user) group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id) group = self.identity_api.create_group(group) self.identity_api.add_user_to_group(user['id'], group['id']) # Grant inherited group role self.assignment_api.create_grant(group_id=group['id'], project_id=root_project['id'], role_id=self.role_admin['id'], inherited_to_projects=True) # Grant non-inherited group role self.assignment_api.create_grant(group_id=group['id'], project_id=root_project['id'], role_id=self.role_member['id']) # Should get back both projects: because the direct role assignment for # the root project and inherited role assignment for leaf project user_projects = self.assignment_api.list_projects_for_user(user['id']) self.assertEqual(2, len(user_projects)) self.assertIn(root_project, user_projects) self.assertIn(leaf_project, user_projects) # Disable OS-INHERIT extension self.config_fixture.config(group='os_inherit', enabled=False) # Should get back just root project - due the direct role assignment user_projects = self.assignment_api.list_projects_for_user(user['id']) self.assertEqual(1, len(user_projects)) self.assertIn(root_project, user_projects) # TODO(henry-nash): The test above uses list_projects_for_user # which may, in a subsequent patch, be re-implemented to call # list_role_assignments and then report only the distinct projects. # # The test plan below therefore mirrors this test, to ensure that # list_role_assignments works the same. Once list_projects_for_user # has been re-implemented then the manual tests above can be # refactored. test_plan = { # A domain with a project ans sub-project, plus a user. # Also, create 2 roles. 'entities': { 'domains': {'id': CONF.identity.default_domain_id, 'users': 1, 'groups': 1, 'projects': {'project': 1}}, 'roles': 2}, 'group_memberships': [{'group': 0, 'users': [0]}], # A direct role and an inherited role on the parent 'assignments': [{'group': 0, 'role': 0, 'project': 0}, {'group': 0, 'role': 1, 'project': 0, 'inherited_to_projects': True}], 'tests': [ # List all effective assignments for user[0] - should get back # one direct role plus one inherited role. {'params': {'user': 0, 'effective': True}, 'results': [{'user': 0, 'role': 0, 'project': 0, 'indirect': {'group': 0}}, {'user': 0, 'role': 1, 'project': 1, 'indirect': {'group': 0, 'project': 0}}]} ] } test_plan_with_os_inherit_disabled = { 'tests': [ # List all effective assignments for user[0] - should only get # back the one direct role. {'params': {'user': 0, 'effective': True}, 'results': [{'user': 0, 'role': 0, 'project': 0, 'indirect': {'group': 0}}]} ] } self.config_fixture.config(group='os_inherit', enabled=True) test_data = self.execute_assignment_plan(test_plan) self.config_fixture.config(group='os_inherit', enabled=False) # Pass the existing test data in to allow execution of 2nd test plan self.execute_assignment_cases( test_plan_with_os_inherit_disabled, test_data) def test_list_assignments_for_tree(self): """Test we correctly list direct assignments for a tree""" # Enable OS-INHERIT extension self.config_fixture.config(group='os_inherit', enabled=True) test_plan = { # Create a domain with a project hierarchy 3 levels deep: # # project 0 # ____________|____________ # | | # project 1 project 4 # ______|_____ ______|_____ # | | | | # project 2 project 3 project 5 project 6 # # Also, create 1 user and 4 roles. 'entities': { 'domains': { 'projects': {'project': [{'project': 2}, {'project': 2}]}, 'users': 1}, 'roles': 4}, 'assignments': [ # Direct assignment to projects 1 and 2 {'user': 0, 'role': 0, 'project': 1}, {'user': 0, 'role': 1, 'project': 2}, # Also an inherited assignment on project 1 {'user': 0, 'role': 2, 'project': 1, 'inherited_to_projects': True}, # ...and two spoiler assignments, one to the root and one # to project 4 {'user': 0, 'role': 0, 'project': 0}, {'user': 0, 'role': 3, 'project': 4}], 'tests': [ # List all assignments for project 1 and its subtree. {'params': {'project': 1, 'include_subtree': True}, 'results': [ # Only the actual assignments should be returned, no # expansion of inherited assignments {'user': 0, 'role': 0, 'project': 1}, {'user': 0, 'role': 1, 'project': 2}, {'user': 0, 'role': 2, 'project': 1, 'inherited_to_projects': 'projects'}]} ] } self.execute_assignment_plan(test_plan) def test_list_effective_assignments_for_tree(self): """Test we correctly list effective assignments for a tree""" # Enable OS-INHERIT extension self.config_fixture.config(group='os_inherit', enabled=True) test_plan = { # Create a domain with a project hierarchy 3 levels deep: # # project 0 # ____________|____________ # | | # project 1 project 4 # ______|_____ ______|_____ # | | | | # project 2 project 3 project 5 project 6 # # Also, create 1 user and 4 roles. 'entities': { 'domains': { 'projects': {'project': [{'project': 2}, {'project': 2}]}, 'users': 1}, 'roles': 4}, 'assignments': [ # An inherited assignment on project 1 {'user': 0, 'role': 1, 'project': 1, 'inherited_to_projects': True}, # A direct assignment to project 2 {'user': 0, 'role': 2, 'project': 2}, # ...and two spoiler assignments, one to the root and one # to project 4 {'user': 0, 'role': 0, 'project': 0}, {'user': 0, 'role': 3, 'project': 4}], 'tests': [ # List all effective assignments for project 1 and its subtree. {'params': {'project': 1, 'effective': True, 'include_subtree': True}, 'results': [ # The inherited assignment on project 1 should appear only # on its children {'user': 0, 'role': 1, 'project': 2, 'indirect': {'project': 1}}, {'user': 0, 'role': 1, 'project': 3, 'indirect': {'project': 1}}, # And finally the direct assignment on project 2 {'user': 0, 'role': 2, 'project': 2}]} ] } self.execute_assignment_plan(test_plan) def test_list_effective_assignments_for_tree_with_mixed_assignments(self): """Test that we correctly combine assignments for a tree. In this test we want to ensure that when asking for a list of assignments in a subtree, any assignments inherited from above the subtree are correctly combined with any assignments within the subtree itself. """ # Enable OS-INHERIT extension self.config_fixture.config(group='os_inherit', enabled=True) test_plan = { # Create a domain with a project hierarchy 3 levels deep: # # project 0 # ____________|____________ # | | # project 1 project 4 # ______|_____ ______|_____ # | | | | # project 2 project 3 project 5 project 6 # # Also, create 2 users, 1 group and 4 roles. 'entities': { 'domains': { 'projects': {'project': [{'project': 2}, {'project': 2}]}, 'users': 2, 'groups': 1}, 'roles': 4}, # Both users are part of the same group 'group_memberships': [{'group': 0, 'users': [0, 1]}], # We are going to ask for listing of assignment on project 1 and # it's subtree. So first we'll add two inherited assignments above # this (one user and one for a group that contains this user). 'assignments': [{'user': 0, 'role': 0, 'project': 0, 'inherited_to_projects': True}, {'group': 0, 'role': 1, 'project': 0, 'inherited_to_projects': True}, # Now an inherited assignment on project 1 itself, # which should ONLY show up on its children {'user': 0, 'role': 2, 'project': 1, 'inherited_to_projects': True}, # ...and a direct assignment on one of those # children {'user': 0, 'role': 3, 'project': 2}, # The rest are spoiler assignments {'user': 0, 'role': 2, 'project': 5}, {'user': 0, 'role': 3, 'project': 4}], 'tests': [ # List all effective assignments for project 1 and its subtree. {'params': {'project': 1, 'user': 0, 'effective': True, 'include_subtree': True}, 'results': [ # First, we should see the inherited user assignment from # project 0 on all projects in the subtree {'user': 0, 'role': 0, 'project': 1, 'indirect': {'project': 0}}, {'user': 0, 'role': 0, 'project': 2, 'indirect': {'project': 0}}, {'user': 0, 'role': 0, 'project': 3, 'indirect': {'project': 0}}, # Also the inherited group assignment from project 0 on # the subtree {'user': 0, 'role': 1, 'project': 1, 'indirect': {'project': 0, 'group': 0}}, {'user': 0, 'role': 1, 'project': 2, 'indirect': {'project': 0, 'group': 0}}, {'user': 0, 'role': 1, 'project': 3, 'indirect': {'project': 0, 'group': 0}}, # The inherited assignment on project 1 should appear only # on its children {'user': 0, 'role': 2, 'project': 2, 'indirect': {'project': 1}}, {'user': 0, 'role': 2, 'project': 3, 'indirect': {'project': 1}}, # And finally the direct assignment on project 2 {'user': 0, 'role': 3, 'project': 2}]} ] } self.execute_assignment_plan(test_plan) def test_list_effective_assignments_for_tree_with_domain_assignments(self): """Test we correctly honor domain inherited assignments on the tree""" # Enable OS-INHERIT extension self.config_fixture.config(group='os_inherit', enabled=True) test_plan = { # Create a domain with a project hierarchy 3 levels deep: # # project 0 # ____________|____________ # | | # project 1 project 4 # ______|_____ ______|_____ # | | | | # project 2 project 3 project 5 project 6 # # Also, create 1 user and 4 roles. 'entities': { 'domains': { 'projects': {'project': [{'project': 2}, {'project': 2}]}, 'users': 1}, 'roles': 4}, 'assignments': [ # An inherited assignment on the domain (which should be # applied to all the projects) {'user': 0, 'role': 1, 'domain': 0, 'inherited_to_projects': True}, # A direct assignment to project 2 {'user': 0, 'role': 2, 'project': 2}, # ...and two spoiler assignments, one to the root and one # to project 4 {'user': 0, 'role': 0, 'project': 0}, {'user': 0, 'role': 3, 'project': 4}], 'tests': [ # List all effective assignments for project 1 and its subtree. {'params': {'project': 1, 'effective': True, 'include_subtree': True}, 'results': [ # The inherited assignment from the domain should appear # only on the part of the subtree we are interested in {'user': 0, 'role': 1, 'project': 1, 'indirect': {'domain': 0}}, {'user': 0, 'role': 1, 'project': 2, 'indirect': {'domain': 0}}, {'user': 0, 'role': 1, 'project': 3, 'indirect': {'domain': 0}}, # And finally the direct assignment on project 2 {'user': 0, 'role': 2, 'project': 2}]} ] } self.execute_assignment_plan(test_plan) def test_list_user_ids_for_project_with_inheritance(self): test_plan = { # A domain with a project and sub-project, plus four users, # two groups, as well as 4 roles. 'entities': { 'domains': {'id': CONF.identity.default_domain_id, 'users': 4, 'groups': 2, 'projects': {'project': 1}}, 'roles': 4}, # Each group has a unique user member 'group_memberships': [{'group': 0, 'users': [1]}, {'group': 1, 'users': [3]}], # Set up assignments so that there should end up with four # effective assignments on project 1 - one direct, one due to # group membership and one user assignment inherited from the # parent and one group assignment inhertied from the parent. 'assignments': [{'user': 0, 'role': 0, 'project': 1}, {'group': 0, 'role': 1, 'project': 1}, {'user': 2, 'role': 2, 'project': 0, 'inherited_to_projects': True}, {'group': 1, 'role': 3, 'project': 0, 'inherited_to_projects': True}], } # Use assignment plan helper to create all the entities and # assignments - then we'll run our own tests using the data test_data = self.execute_assignment_plan(test_plan) self.config_fixture.config(group='os_inherit', enabled=True) user_ids = self.assignment_api.list_user_ids_for_project( test_data['projects'][1]['id']) self.assertThat(user_ids, matchers.HasLength(4)) for x in range(0, 4): self.assertIn(test_data['users'][x]['id'], user_ids) def test_list_role_assignment_using_inherited_sourced_groups(self): """Test listing inherited assignments when restricted by groups.""" test_plan = { # A domain with 3 users, 3 groups, 3 projects, a second domain, # plus 3 roles. 'entities': {'domains': [{'users': 3, 'groups': 3, 'projects': 3}, 1], 'roles': 3}, # Users 0 & 1 are in the group 0, User 0 also in group 1 'group_memberships': [{'group': 0, 'users': [0, 1]}, {'group': 1, 'users': [0]}], # Spread the assignments around - we want to be able to show that # if sourced by group, assignments from other sources are excluded 'assignments': [{'user': 0, 'role': 0, 'domain': 0}, {'group': 0, 'role': 1, 'domain': 1}, {'group': 1, 'role': 2, 'domain': 0, 'inherited_to_projects': True}, {'group': 1, 'role': 2, 'project': 1}, {'user': 2, 'role': 1, 'project': 1, 'inherited_to_projects': True}, {'group': 2, 'role': 2, 'project': 2} ], 'tests': [ # List all effective assignments sourced from groups 0 and 1. # We should see the inherited group assigned on the 3 projects # from domain 0, as well as the direct assignments. {'params': {'source_from_group_ids': [0, 1], 'effective': True}, 'results': [{'group': 0, 'role': 1, 'domain': 1}, {'group': 1, 'role': 2, 'project': 0, 'indirect': {'domain': 0}}, {'group': 1, 'role': 2, 'project': 1, 'indirect': {'domain': 0}}, {'group': 1, 'role': 2, 'project': 2, 'indirect': {'domain': 0}}, {'group': 1, 'role': 2, 'project': 1} ]}, ] } self.execute_assignment_plan(test_plan) class ImpliedRoleTests(AssignmentTestHelperMixin): def test_implied_role_crd(self): prior_role_ref = unit.new_role_ref() self.role_api.create_role(prior_role_ref['id'], prior_role_ref) implied_role_ref = unit.new_role_ref() self.role_api.create_role(implied_role_ref['id'], implied_role_ref) self.role_api.create_implied_role( prior_role_ref['id'], implied_role_ref['id']) implied_role = self.role_api.get_implied_role( prior_role_ref['id'], implied_role_ref['id']) expected_implied_role_ref = { 'prior_role_id': prior_role_ref['id'], 'implied_role_id': implied_role_ref['id']} self.assertDictContainsSubset( expected_implied_role_ref, implied_role) self.role_api.delete_implied_role( prior_role_ref['id'], implied_role_ref['id']) self.assertRaises(exception.ImpliedRoleNotFound, self.role_api.get_implied_role, uuid.uuid4().hex, uuid.uuid4().hex) def test_delete_implied_role_returns_not_found(self): self.assertRaises(exception.ImpliedRoleNotFound, self.role_api.delete_implied_role, uuid.uuid4().hex, uuid.uuid4().hex) def test_role_assignments_simple_tree_of_implied_roles(self): """Test that implied roles are expanded out.""" test_plan = { 'entities': {'domains': {'users': 1, 'projects': 1}, 'roles': 4}, # Three level tree of implied roles 'implied_roles': [{'role': 0, 'implied_roles': 1}, {'role': 1, 'implied_roles': [2, 3]}], 'assignments': [{'user': 0, 'role': 0, 'project': 0}], 'tests': [ # List all direct assignments for user[0], this should just # show the one top level role assignment {'params': {'user': 0}, 'results': [{'user': 0, 'role': 0, 'project': 0}]}, # Listing in effective mode should show the implied roles # expanded out {'params': {'user': 0, 'effective': True}, 'results': [{'user': 0, 'role': 0, 'project': 0}, {'user': 0, 'role': 1, 'project': 0, 'indirect': {'role': 0}}, {'user': 0, 'role': 2, 'project': 0, 'indirect': {'role': 1}}, {'user': 0, 'role': 3, 'project': 0, 'indirect': {'role': 1}}]}, ] } self.execute_assignment_plan(test_plan) def test_circular_inferences(self): """Test that implied roles are expanded out.""" test_plan = { 'entities': {'domains': {'users': 1, 'projects': 1}, 'roles': 4}, # Three level tree of implied roles 'implied_roles': [{'role': 0, 'implied_roles': [1]}, {'role': 1, 'implied_roles': [2, 3]}, {'role': 3, 'implied_roles': [0]}], 'assignments': [{'user': 0, 'role': 0, 'project': 0}], 'tests': [ # List all direct assignments for user[0], this should just # show the one top level role assignment {'params': {'user': 0}, 'results': [{'user': 0, 'role': 0, 'project': 0}]}, # Listing in effective mode should show the implied roles # expanded out {'params': {'user': 0, 'effective': True}, 'results': [{'user': 0, 'role': 0, 'project': 0}, {'user': 0, 'role': 0, 'project': 0, 'indirect': {'role': 3}}, {'user': 0, 'role': 1, 'project': 0, 'indirect': {'role': 0}}, {'user': 0, 'role': 2, 'project': 0, 'indirect': {'role': 1}}, {'user': 0, 'role': 3, 'project': 0, 'indirect': {'role': 1}}]}, ] } self.execute_assignment_plan(test_plan) def test_role_assignments_directed_graph_of_implied_roles(self): """Test that a role can have multiple, different prior roles.""" test_plan = { 'entities': {'domains': {'users': 1, 'projects': 1}, 'roles': 6}, # Three level tree of implied roles, where one of the roles at the # bottom is implied by more than one top level role 'implied_roles': [{'role': 0, 'implied_roles': [1, 2]}, {'role': 1, 'implied_roles': [3, 4]}, {'role': 5, 'implied_roles': 4}], # The user gets both top level roles 'assignments': [{'user': 0, 'role': 0, 'project': 0}, {'user': 0, 'role': 5, 'project': 0}], 'tests': [ # The implied roles should be expanded out and there should be # two entries for the role that had two different prior roles. {'params': {'user': 0, 'effective': True}, 'results': [{'user': 0, 'role': 0, 'project': 0}, {'user': 0, 'role': 5, 'project': 0}, {'user': 0, 'role': 1, 'project': 0, 'indirect': {'role': 0}}, {'user': 0, 'role': 2, 'project': 0, 'indirect': {'role': 0}}, {'user': 0, 'role': 3, 'project': 0, 'indirect': {'role': 1}}, {'user': 0, 'role': 4, 'project': 0, 'indirect': {'role': 1}}, {'user': 0, 'role': 4, 'project': 0, 'indirect': {'role': 5}}]}, ] } test_data = self.execute_assignment_plan(test_plan) # We should also be able to get a similar (yet summarized) answer to # the above by calling get_roles_for_user_and_project(), which should # list the role_ids, yet remove any duplicates role_ids = self.assignment_api.get_roles_for_user_and_project( test_data['users'][0]['id'], test_data['projects'][0]['id']) # We should see 6 entries, not 7, since role index 5 appeared twice in # the answer from list_role_assignments self.assertThat(role_ids, matchers.HasLength(6)) for x in range(0, 5): self.assertIn(test_data['roles'][x]['id'], role_ids) def test_role_assignments_implied_roles_filtered_by_role(self): """Test that you can filter by role even if roles are implied.""" test_plan = { 'entities': {'domains': {'users': 1, 'projects': 2}, 'roles': 4}, # Three level tree of implied roles 'implied_roles': [{'role': 0, 'implied_roles': 1}, {'role': 1, 'implied_roles': [2, 3]}], 'assignments': [{'user': 0, 'role': 0, 'project': 0}, {'user': 0, 'role': 3, 'project': 1}], 'tests': [ # List effective roles filtering by one of the implied roles, # showing that the filter was implied post expansion of # implied roles (and that non impled roles are included in # the filter {'params': {'role': 3, 'effective': True}, 'results': [{'user': 0, 'role': 3, 'project': 0, 'indirect': {'role': 1}}, {'user': 0, 'role': 3, 'project': 1}]}, ] } self.execute_assignment_plan(test_plan) def test_role_assignments_simple_tree_of_implied_roles_on_domain(self): """Test that implied roles are expanded out when placed on a domain.""" test_plan = { 'entities': {'domains': {'users': 1}, 'roles': 4}, # Three level tree of implied roles 'implied_roles': [{'role': 0, 'implied_roles': 1}, {'role': 1, 'implied_roles': [2, 3]}], 'assignments': [{'user': 0, 'role': 0, 'domain': 0}], 'tests': [ # List all direct assignments for user[0], this should just # show the one top level role assignment {'params': {'user': 0}, 'results': [{'user': 0, 'role': 0, 'domain': 0}]}, # Listing in effective mode should how the implied roles # expanded out {'params': {'user': 0, 'effective': True}, 'results': [{'user': 0, 'role': 0, 'domain': 0}, {'user': 0, 'role': 1, 'domain': 0, 'indirect': {'role': 0}}, {'user': 0, 'role': 2, 'domain': 0, 'indirect': {'role': 1}}, {'user': 0, 'role': 3, 'domain': 0, 'indirect': {'role': 1}}]}, ] } self.execute_assignment_plan(test_plan) def test_role_assignments_inherited_implied_roles(self): """Test that you can intermix inherited and implied roles.""" test_plan = { 'entities': {'domains': {'users': 1, 'projects': 1}, 'roles': 4}, # Simply one level of implied roles 'implied_roles': [{'role': 0, 'implied_roles': 1}], # Assign to top level role as an inherited assignment to the # domain 'assignments': [{'user': 0, 'role': 0, 'domain': 0, 'inherited_to_projects': True}], 'tests': [ # List all direct assignments for user[0], this should just # show the one top level role assignment {'params': {'user': 0}, 'results': [{'user': 0, 'role': 0, 'domain': 0, 'inherited_to_projects': 'projects'}]}, # List in effective mode - we should only see the initial and # implied role on the project (since inherited roles are not # active on their anchor point). {'params': {'user': 0, 'effective': True}, 'results': [{'user': 0, 'role': 0, 'project': 0, 'indirect': {'domain': 0}}, {'user': 0, 'role': 1, 'project': 0, 'indirect': {'domain': 0, 'role': 0}}]}, ] } self.config_fixture.config(group='os_inherit', enabled=True) self.execute_assignment_plan(test_plan) def test_role_assignments_domain_specific_with_implied_roles(self): test_plan = { 'entities': {'domains': {'users': 1, 'projects': 1, 'roles': 2}, 'roles': 2}, # Two level tree of implied roles, with the top and 1st level being # domain specific roles, and the bottom level being infered global # roles. 'implied_roles': [{'role': 0, 'implied_roles': [1]}, {'role': 1, 'implied_roles': [2, 3]}], 'assignments': [{'user': 0, 'role': 0, 'project': 0}], 'tests': [ # List all direct assignments for user[0], this should just # show the one top level role assignment, even though this is a # domain specific role (since we are in non-effective mode and # we show any direct role assignment in that mode). {'params': {'user': 0}, 'results': [{'user': 0, 'role': 0, 'project': 0}]}, # Now the effective ones - so the implied roles should be # expanded out, as well as any domain specific roles should be # removed. {'params': {'user': 0, 'effective': True}, 'results': [{'user': 0, 'role': 2, 'project': 0, 'indirect': {'role': 1}}, {'user': 0, 'role': 3, 'project': 0, 'indirect': {'role': 1}}]}, ] } self.execute_assignment_plan(test_plan) keystone-9.0.0/keystone/tests/unit/resource/0000775000567000056710000000000012701407246022336 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/resource/backends/0000775000567000056710000000000012701407246024110 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/resource/backends/test_sql.py0000664000567000056710000000174312701407102026314 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.resource.backends import sql from keystone.tests import unit from keystone.tests.unit.ksfixtures import database from keystone.tests.unit.resource import test_backends class TestSqlResourceDriver(unit.BaseTestCase, test_backends.ResourceDriverTests): def setUp(self): super(TestSqlResourceDriver, self).setUp() self.useFixture(database.Database()) self.driver = sql.Resource() keystone-9.0.0/keystone/tests/unit/resource/backends/__init__.py0000664000567000056710000000000012701407102026176 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/resource/test_controllers.py0000664000567000056710000000401012701407102026277 0ustar jenkinsjenkins00000000000000# Copyright 2016 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from oslo_config import cfg from keystone import exception from keystone.resource import controllers from keystone.tests import unit from keystone.tests.unit.ksfixtures import database CONF = cfg.CONF _ADMIN_CONTEXT = {'is_admin': True, 'query_string': {}} class TenantTestCaseNoDefaultDomain(unit.TestCase): def setUp(self): super(TenantTestCaseNoDefaultDomain, self).setUp() self.useFixture(database.Database()) self.load_backends() self.tenant_controller = controllers.Tenant() def test_setup(self): # Other tests in this class assume there's no default domain, so make # sure the setUp worked as expected. self.assertRaises( exception.DomainNotFound, self.resource_api.get_domain, CONF.identity.default_domain_id) def test_get_all_projects(self): # When get_all_projects is done and there's no default domain, the # result is an empty list. res = self.tenant_controller.get_all_projects(_ADMIN_CONTEXT) self.assertEqual([], res['tenants']) def test_create_project(self): # When a project is created using the v2 controller and there's no # default domain, it doesn't fail with can't find domain (a default # domain is created) tenant = {'name': uuid.uuid4().hex} self.tenant_controller.create_project(_ADMIN_CONTEXT, tenant) # If the above doesn't fail then this is successful. keystone-9.0.0/keystone/tests/unit/resource/__init__.py0000664000567000056710000000000012701407102024424 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/resource/test_core.py0000664000567000056710000007315412701407105024703 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import uuid import mock from testtools import matchers from oslo_config import cfg from oslotest import mockpatch from keystone import exception from keystone.tests import unit from keystone.tests.unit.ksfixtures import database CONF = cfg.CONF class TestResourceManagerNoFixtures(unit.SQLDriverOverrides, unit.TestCase): def setUp(self): super(TestResourceManagerNoFixtures, self).setUp() self.useFixture(database.Database(self.sql_driver_version_overrides)) self.load_backends() def test_ensure_default_domain_exists(self): # When there's no default domain, ensure_default_domain_exists creates # it. # First make sure there's no default domain. self.assertRaises( exception.DomainNotFound, self.resource_api.get_domain, CONF.identity.default_domain_id) self.resource_api.ensure_default_domain_exists() default_domain = self.resource_api.get_domain( CONF.identity.default_domain_id) expected_domain = { 'id': CONF.identity.default_domain_id, 'name': 'Default', 'enabled': True, 'description': 'Domain created automatically to support V2.0 ' 'operations.', } self.assertEqual(expected_domain, default_domain) def test_ensure_default_domain_exists_already_exists(self): # When there's already a default domain, ensure_default_domain_exists # doesn't do anything. name = uuid.uuid4().hex description = uuid.uuid4().hex domain_attrs = { 'id': CONF.identity.default_domain_id, 'name': name, 'description': description, } self.resource_api.create_domain(CONF.identity.default_domain_id, domain_attrs) self.resource_api.ensure_default_domain_exists() default_domain = self.resource_api.get_domain( CONF.identity.default_domain_id) expected_domain = { 'id': CONF.identity.default_domain_id, 'name': name, 'enabled': True, 'description': description, } self.assertEqual(expected_domain, default_domain) def test_ensure_default_domain_exists_fails(self): # When there's an unexpected exception creating domain it's passed on. self.useFixture(mockpatch.PatchObject( self.resource_api, 'create_domain', side_effect=exception.UnexpectedError)) self.assertRaises(exception.UnexpectedError, self.resource_api.ensure_default_domain_exists) class DomainConfigDriverTests(object): def _domain_config_crud(self, sensitive): domain = uuid.uuid4().hex group = uuid.uuid4().hex option = uuid.uuid4().hex value = uuid.uuid4().hex self.driver.create_config_option( domain, group, option, value, sensitive) res = self.driver.get_config_option( domain, group, option, sensitive) config = {'group': group, 'option': option, 'value': value} self.assertEqual(config, res) value = uuid.uuid4().hex self.driver.update_config_option( domain, group, option, value, sensitive) res = self.driver.get_config_option( domain, group, option, sensitive) config = {'group': group, 'option': option, 'value': value} self.assertEqual(config, res) self.driver.delete_config_options( domain, group, option, sensitive) self.assertRaises(exception.DomainConfigNotFound, self.driver.get_config_option, domain, group, option, sensitive) # ...and silent if we try to delete it again self.driver.delete_config_options( domain, group, option, sensitive) def test_whitelisted_domain_config_crud(self): self._domain_config_crud(sensitive=False) def test_sensitive_domain_config_crud(self): self._domain_config_crud(sensitive=True) def _list_domain_config(self, sensitive): """Test listing by combination of domain, group & option.""" config1 = {'group': uuid.uuid4().hex, 'option': uuid.uuid4().hex, 'value': uuid.uuid4().hex} # Put config2 in the same group as config1 config2 = {'group': config1['group'], 'option': uuid.uuid4().hex, 'value': uuid.uuid4().hex} config3 = {'group': uuid.uuid4().hex, 'option': uuid.uuid4().hex, 'value': 100} domain = uuid.uuid4().hex for config in [config1, config2, config3]: self.driver.create_config_option( domain, config['group'], config['option'], config['value'], sensitive) # Try listing all items from a domain res = self.driver.list_config_options( domain, sensitive=sensitive) self.assertThat(res, matchers.HasLength(3)) for res_entry in res: self.assertIn(res_entry, [config1, config2, config3]) # Try listing by domain and group res = self.driver.list_config_options( domain, group=config1['group'], sensitive=sensitive) self.assertThat(res, matchers.HasLength(2)) for res_entry in res: self.assertIn(res_entry, [config1, config2]) # Try listing by domain, group and option res = self.driver.list_config_options( domain, group=config2['group'], option=config2['option'], sensitive=sensitive) self.assertThat(res, matchers.HasLength(1)) self.assertEqual(config2, res[0]) def test_list_whitelisted_domain_config_crud(self): self._list_domain_config(False) def test_list_sensitive_domain_config_crud(self): self._list_domain_config(True) def _delete_domain_configs(self, sensitive): """Test deleting by combination of domain, group & option.""" config1 = {'group': uuid.uuid4().hex, 'option': uuid.uuid4().hex, 'value': uuid.uuid4().hex} # Put config2 and config3 in the same group as config1 config2 = {'group': config1['group'], 'option': uuid.uuid4().hex, 'value': uuid.uuid4().hex} config3 = {'group': config1['group'], 'option': uuid.uuid4().hex, 'value': uuid.uuid4().hex} config4 = {'group': uuid.uuid4().hex, 'option': uuid.uuid4().hex, 'value': uuid.uuid4().hex} domain = uuid.uuid4().hex for config in [config1, config2, config3, config4]: self.driver.create_config_option( domain, config['group'], config['option'], config['value'], sensitive) # Try deleting by domain, group and option res = self.driver.delete_config_options( domain, group=config2['group'], option=config2['option'], sensitive=sensitive) res = self.driver.list_config_options( domain, sensitive=sensitive) self.assertThat(res, matchers.HasLength(3)) for res_entry in res: self.assertIn(res_entry, [config1, config3, config4]) # Try deleting by domain and group res = self.driver.delete_config_options( domain, group=config4['group'], sensitive=sensitive) res = self.driver.list_config_options( domain, sensitive=sensitive) self.assertThat(res, matchers.HasLength(2)) for res_entry in res: self.assertIn(res_entry, [config1, config3]) # Try deleting all items from a domain res = self.driver.delete_config_options( domain, sensitive=sensitive) res = self.driver.list_config_options( domain, sensitive=sensitive) self.assertThat(res, matchers.HasLength(0)) def test_delete_whitelisted_domain_configs(self): self._delete_domain_configs(False) def test_delete_sensitive_domain_configs(self): self._delete_domain_configs(True) def _create_domain_config_twice(self, sensitive): """Test conflict error thrown if create the same option twice.""" config = {'group': uuid.uuid4().hex, 'option': uuid.uuid4().hex, 'value': uuid.uuid4().hex} domain = uuid.uuid4().hex self.driver.create_config_option( domain, config['group'], config['option'], config['value'], sensitive=sensitive) self.assertRaises(exception.Conflict, self.driver.create_config_option, domain, config['group'], config['option'], config['value'], sensitive=sensitive) def test_create_whitelisted_domain_config_twice(self): self._create_domain_config_twice(False) def test_create_sensitive_domain_config_twice(self): self._create_domain_config_twice(True) class DomainConfigTests(object): def setUp(self): self.domain = unit.new_domain_ref() self.resource_api.create_domain(self.domain['id'], self.domain) self.addCleanup(self.clean_up_domain) def clean_up_domain(self): # NOTE(henry-nash): Deleting the domain will also delete any domain # configs for this domain. self.domain['enabled'] = False self.resource_api.update_domain(self.domain['id'], self.domain) self.resource_api.delete_domain(self.domain['id']) del self.domain def test_create_domain_config_including_sensitive_option(self): config = {'ldap': {'url': uuid.uuid4().hex, 'user_tree_dn': uuid.uuid4().hex, 'password': uuid.uuid4().hex}} self.domain_config_api.create_config(self.domain['id'], config) # password is sensitive, so check that the whitelisted portion and # the sensitive piece have been stored in the appropriate locations. res = self.domain_config_api.get_config(self.domain['id']) config_whitelisted = copy.deepcopy(config) config_whitelisted['ldap'].pop('password') self.assertEqual(config_whitelisted, res) res = self.domain_config_api.driver.get_config_option( self.domain['id'], 'ldap', 'password', sensitive=True) self.assertEqual(config['ldap']['password'], res['value']) # Finally, use the non-public API to get back the whole config res = self.domain_config_api.get_config_with_sensitive_info( self.domain['id']) self.assertEqual(config, res) def test_get_partial_domain_config(self): config = {'ldap': {'url': uuid.uuid4().hex, 'user_tree_dn': uuid.uuid4().hex, 'password': uuid.uuid4().hex}, 'identity': {'driver': uuid.uuid4().hex}} self.domain_config_api.create_config(self.domain['id'], config) res = self.domain_config_api.get_config(self.domain['id'], group='identity') config_partial = copy.deepcopy(config) config_partial.pop('ldap') self.assertEqual(config_partial, res) res = self.domain_config_api.get_config( self.domain['id'], group='ldap', option='user_tree_dn') self.assertEqual({'user_tree_dn': config['ldap']['user_tree_dn']}, res) # ...but we should fail to get a sensitive option self.assertRaises(exception.DomainConfigNotFound, self.domain_config_api.get_config, self.domain['id'], group='ldap', option='password') def test_delete_partial_domain_config(self): config = {'ldap': {'url': uuid.uuid4().hex, 'user_tree_dn': uuid.uuid4().hex, 'password': uuid.uuid4().hex}, 'identity': {'driver': uuid.uuid4().hex}} self.domain_config_api.create_config(self.domain['id'], config) self.domain_config_api.delete_config( self.domain['id'], group='identity') config_partial = copy.deepcopy(config) config_partial.pop('identity') config_partial['ldap'].pop('password') res = self.domain_config_api.get_config(self.domain['id']) self.assertEqual(config_partial, res) self.domain_config_api.delete_config( self.domain['id'], group='ldap', option='url') config_partial = copy.deepcopy(config_partial) config_partial['ldap'].pop('url') res = self.domain_config_api.get_config(self.domain['id']) self.assertEqual(config_partial, res) def test_get_options_not_in_domain_config(self): self.assertRaises(exception.DomainConfigNotFound, self.domain_config_api.get_config, self.domain['id']) config = {'ldap': {'url': uuid.uuid4().hex}} self.domain_config_api.create_config(self.domain['id'], config) self.assertRaises(exception.DomainConfigNotFound, self.domain_config_api.get_config, self.domain['id'], group='identity') self.assertRaises(exception.DomainConfigNotFound, self.domain_config_api.get_config, self.domain['id'], group='ldap', option='user_tree_dn') def test_get_sensitive_config(self): config = {'ldap': {'url': uuid.uuid4().hex, 'user_tree_dn': uuid.uuid4().hex, 'password': uuid.uuid4().hex}, 'identity': {'driver': uuid.uuid4().hex}} res = self.domain_config_api.get_config_with_sensitive_info( self.domain['id']) self.assertEqual({}, res) self.domain_config_api.create_config(self.domain['id'], config) res = self.domain_config_api.get_config_with_sensitive_info( self.domain['id']) self.assertEqual(config, res) def test_update_partial_domain_config(self): config = {'ldap': {'url': uuid.uuid4().hex, 'user_tree_dn': uuid.uuid4().hex, 'password': uuid.uuid4().hex}, 'identity': {'driver': uuid.uuid4().hex}} self.domain_config_api.create_config(self.domain['id'], config) # Try updating a group new_config = {'ldap': {'url': uuid.uuid4().hex, 'user_filter': uuid.uuid4().hex}} res = self.domain_config_api.update_config( self.domain['id'], new_config, group='ldap') expected_config = copy.deepcopy(config) expected_config['ldap']['url'] = new_config['ldap']['url'] expected_config['ldap']['user_filter'] = ( new_config['ldap']['user_filter']) expected_full_config = copy.deepcopy(expected_config) expected_config['ldap'].pop('password') res = self.domain_config_api.get_config(self.domain['id']) self.assertEqual(expected_config, res) # The sensitive option should still exist res = self.domain_config_api.get_config_with_sensitive_info( self.domain['id']) self.assertEqual(expected_full_config, res) # Try updating a single whitelisted option self.domain_config_api.delete_config(self.domain['id']) self.domain_config_api.create_config(self.domain['id'], config) new_config = {'url': uuid.uuid4().hex} res = self.domain_config_api.update_config( self.domain['id'], new_config, group='ldap', option='url') # Make sure whitelisted and full config is updated expected_whitelisted_config = copy.deepcopy(config) expected_whitelisted_config['ldap']['url'] = new_config['url'] expected_full_config = copy.deepcopy(expected_whitelisted_config) expected_whitelisted_config['ldap'].pop('password') self.assertEqual(expected_whitelisted_config, res) res = self.domain_config_api.get_config(self.domain['id']) self.assertEqual(expected_whitelisted_config, res) res = self.domain_config_api.get_config_with_sensitive_info( self.domain['id']) self.assertEqual(expected_full_config, res) # Try updating a single sensitive option self.domain_config_api.delete_config(self.domain['id']) self.domain_config_api.create_config(self.domain['id'], config) new_config = {'password': uuid.uuid4().hex} res = self.domain_config_api.update_config( self.domain['id'], new_config, group='ldap', option='password') # The whitelisted config should not have changed... expected_whitelisted_config = copy.deepcopy(config) expected_full_config = copy.deepcopy(config) expected_whitelisted_config['ldap'].pop('password') self.assertEqual(expected_whitelisted_config, res) res = self.domain_config_api.get_config(self.domain['id']) self.assertEqual(expected_whitelisted_config, res) expected_full_config['ldap']['password'] = new_config['password'] res = self.domain_config_api.get_config_with_sensitive_info( self.domain['id']) # ...but the sensitive piece should have. self.assertEqual(expected_full_config, res) def test_update_invalid_partial_domain_config(self): config = {'ldap': {'url': uuid.uuid4().hex, 'user_tree_dn': uuid.uuid4().hex, 'password': uuid.uuid4().hex}, 'identity': {'driver': uuid.uuid4().hex}} # An extra group, when specifying one group should fail self.assertRaises(exception.InvalidDomainConfig, self.domain_config_api.update_config, self.domain['id'], config, group='ldap') # An extra option, when specifying one option should fail self.assertRaises(exception.InvalidDomainConfig, self.domain_config_api.update_config, self.domain['id'], config['ldap'], group='ldap', option='url') # Now try the right number of groups/options, but just not # ones that are in the config provided config = {'ldap': {'user_tree_dn': uuid.uuid4().hex}} self.assertRaises(exception.InvalidDomainConfig, self.domain_config_api.update_config, self.domain['id'], config, group='identity') self.assertRaises(exception.InvalidDomainConfig, self.domain_config_api.update_config, self.domain['id'], config['ldap'], group='ldap', option='url') # Now some valid groups/options, but just not ones that are in the # existing config config = {'ldap': {'user_tree_dn': uuid.uuid4().hex}} self.domain_config_api.create_config(self.domain['id'], config) config_wrong_group = {'identity': {'driver': uuid.uuid4().hex}} self.assertRaises(exception.DomainConfigNotFound, self.domain_config_api.update_config, self.domain['id'], config_wrong_group, group='identity') config_wrong_option = {'url': uuid.uuid4().hex} self.assertRaises(exception.DomainConfigNotFound, self.domain_config_api.update_config, self.domain['id'], config_wrong_option, group='ldap', option='url') # And finally just some bad groups/options bad_group = uuid.uuid4().hex config = {bad_group: {'user': uuid.uuid4().hex}} self.assertRaises(exception.InvalidDomainConfig, self.domain_config_api.update_config, self.domain['id'], config, group=bad_group, option='user') bad_option = uuid.uuid4().hex config = {'ldap': {bad_option: uuid.uuid4().hex}} self.assertRaises(exception.InvalidDomainConfig, self.domain_config_api.update_config, self.domain['id'], config, group='ldap', option=bad_option) def test_create_invalid_domain_config(self): self.assertRaises(exception.InvalidDomainConfig, self.domain_config_api.create_config, self.domain['id'], {}) config = {uuid.uuid4().hex: uuid.uuid4().hex} self.assertRaises(exception.InvalidDomainConfig, self.domain_config_api.create_config, self.domain['id'], config) config = {uuid.uuid4().hex: {uuid.uuid4().hex: uuid.uuid4().hex}} self.assertRaises(exception.InvalidDomainConfig, self.domain_config_api.create_config, self.domain['id'], config) config = {'ldap': {uuid.uuid4().hex: uuid.uuid4().hex}} self.assertRaises(exception.InvalidDomainConfig, self.domain_config_api.create_config, self.domain['id'], config) # Try an option that IS in the standard conf, but neither whitelisted # or marked as sensitive config = {'identity': {'user_tree_dn': uuid.uuid4().hex}} self.assertRaises(exception.InvalidDomainConfig, self.domain_config_api.create_config, self.domain['id'], config) def test_delete_invalid_partial_domain_config(self): config = {'ldap': {'url': uuid.uuid4().hex}} self.domain_config_api.create_config(self.domain['id'], config) # Try deleting a group not in the config self.assertRaises(exception.DomainConfigNotFound, self.domain_config_api.delete_config, self.domain['id'], group='identity') # Try deleting an option not in the config self.assertRaises(exception.DomainConfigNotFound, self.domain_config_api.delete_config, self.domain['id'], group='ldap', option='user_tree_dn') def test_sensitive_substitution_in_domain_config(self): # Create a config that contains a whitelisted option that requires # substitution of a sensitive option. config = {'ldap': {'url': 'my_url/%(password)s', 'user_tree_dn': uuid.uuid4().hex, 'password': uuid.uuid4().hex}, 'identity': {'driver': uuid.uuid4().hex}} self.domain_config_api.create_config(self.domain['id'], config) # Read back the config with the internal method and ensure that the # substitution has taken place. res = self.domain_config_api.get_config_with_sensitive_info( self.domain['id']) expected_url = ( config['ldap']['url'] % {'password': config['ldap']['password']}) self.assertEqual(expected_url, res['ldap']['url']) def test_invalid_sensitive_substitution_in_domain_config(self): """Check that invalid substitutions raise warnings.""" mock_log = mock.Mock() invalid_option_config = { 'ldap': {'user_tree_dn': uuid.uuid4().hex, 'password': uuid.uuid4().hex}, 'identity': {'driver': uuid.uuid4().hex}} for invalid_option in ['my_url/%(passssword)s', 'my_url/%(password', 'my_url/%(password)', 'my_url/%(password)d']: invalid_option_config['ldap']['url'] = invalid_option self.domain_config_api.create_config( self.domain['id'], invalid_option_config) with mock.patch('keystone.resource.core.LOG', mock_log): res = self.domain_config_api.get_config_with_sensitive_info( self.domain['id']) mock_log.warning.assert_any_call(mock.ANY) self.assertEqual( invalid_option_config['ldap']['url'], res['ldap']['url']) def test_escaped_sequence_in_domain_config(self): """Check that escaped '%(' doesn't get interpreted.""" mock_log = mock.Mock() escaped_option_config = { 'ldap': {'url': 'my_url/%%(password)s', 'user_tree_dn': uuid.uuid4().hex, 'password': uuid.uuid4().hex}, 'identity': {'driver': uuid.uuid4().hex}} self.domain_config_api.create_config( self.domain['id'], escaped_option_config) with mock.patch('keystone.resource.core.LOG', mock_log): res = self.domain_config_api.get_config_with_sensitive_info( self.domain['id']) self.assertFalse(mock_log.warn.called) # The escaping '%' should have been removed self.assertEqual('my_url/%(password)s', res['ldap']['url']) @unit.skip_if_cache_disabled('domain_config') def test_cache_layer_get_sensitive_config(self): config = {'ldap': {'url': uuid.uuid4().hex, 'user_tree_dn': uuid.uuid4().hex, 'password': uuid.uuid4().hex}, 'identity': {'driver': uuid.uuid4().hex}} self.domain_config_api.create_config(self.domain['id'], config) # cache the result res = self.domain_config_api.get_config_with_sensitive_info( self.domain['id']) self.assertEqual(config, res) # delete, bypassing domain config manager api self.domain_config_api.delete_config_options(self.domain['id']) self.domain_config_api.delete_config_options(self.domain['id'], sensitive=True) self.assertDictEqual( res, self.domain_config_api.get_config_with_sensitive_info( self.domain['id'])) self.domain_config_api.get_config_with_sensitive_info.invalidate( self.domain_config_api, self.domain['id']) self.assertDictEqual( {}, self.domain_config_api.get_config_with_sensitive_info( self.domain['id'])) def test_delete_domain_deletes_configs(self): """Test domain deletion clears the domain configs.""" domain = unit.new_domain_ref() self.resource_api.create_domain(domain['id'], domain) config = {'ldap': {'url': uuid.uuid4().hex, 'user_tree_dn': uuid.uuid4().hex, 'password': uuid.uuid4().hex}} self.domain_config_api.create_config(domain['id'], config) # Now delete the domain domain['enabled'] = False self.resource_api.update_domain(domain['id'], domain) self.resource_api.delete_domain(domain['id']) # Check domain configs have also been deleted self.assertRaises( exception.DomainConfigNotFound, self.domain_config_api.get_config, domain['id']) # The get_config_with_sensitive_info does not throw an exception if # the config is empty, it just returns an empty dict self.assertDictEqual( {}, self.domain_config_api.get_config_with_sensitive_info( domain['id'])) def test_config_registration(self): type = uuid.uuid4().hex self.domain_config_api.obtain_registration( self.domain['id'], type) self.domain_config_api.release_registration( self.domain['id'], type=type) # Make sure that once someone has it, nobody else can get it. # This includes the domain who already has it. self.domain_config_api.obtain_registration( self.domain['id'], type) self.assertFalse( self.domain_config_api.obtain_registration( self.domain['id'], type)) # Make sure we can read who does have it self.assertEqual( self.domain['id'], self.domain_config_api.read_registration(type)) # Make sure releasing it is silent if the domain specified doesn't # have the registration domain2 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex} self.resource_api.create_domain(domain2['id'], domain2) self.domain_config_api.release_registration( domain2['id'], type=type) # If nobody has the type registered, then trying to read it should # raise ConfigRegistrationNotFound self.domain_config_api.release_registration( self.domain['id'], type=type) self.assertRaises(exception.ConfigRegistrationNotFound, self.domain_config_api.read_registration, type) # Finally check multiple registrations are cleared if you free the # registration without specifying the type type2 = uuid.uuid4().hex self.domain_config_api.obtain_registration( self.domain['id'], type) self.domain_config_api.obtain_registration( self.domain['id'], type2) self.domain_config_api.release_registration(self.domain['id']) self.assertRaises(exception.ConfigRegistrationNotFound, self.domain_config_api.read_registration, type) self.assertRaises(exception.ConfigRegistrationNotFound, self.domain_config_api.read_registration, type2) keystone-9.0.0/keystone/tests/unit/resource/test_backends.py0000664000567000056710000021667512701407102025531 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import uuid import mock from oslo_config import cfg from six.moves import range from testtools import matchers from keystone.common import driver_hints from keystone import exception from keystone.tests import unit from keystone.tests.unit import default_fixtures from keystone.tests.unit import utils as test_utils CONF = cfg.CONF class ResourceTests(object): domain_count = len(default_fixtures.DOMAINS) def test_get_project(self): tenant_ref = self.resource_api.get_project(self.tenant_bar['id']) self.assertDictEqual(self.tenant_bar, tenant_ref) def test_get_project_returns_not_found(self): self.assertRaises(exception.ProjectNotFound, self.resource_api.get_project, uuid.uuid4().hex) def test_get_project_by_name(self): tenant_ref = self.resource_api.get_project_by_name( self.tenant_bar['name'], CONF.identity.default_domain_id) self.assertDictEqual(self.tenant_bar, tenant_ref) @unit.skip_if_no_multiple_domains_support def test_get_project_by_name_for_project_acting_as_a_domain(self): """Tests get_project_by_name works when the domain_id is None.""" project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id, is_domain=False) project = self.resource_api.create_project(project['id'], project) self.assertRaises(exception.ProjectNotFound, self.resource_api.get_project_by_name, project['name'], None) # Test that querying with domain_id as None will find the project # acting as a domain, even if it's name is the same as the regular # project above. project2 = unit.new_project_ref(is_domain=True, name=project['name']) project2 = self.resource_api.create_project(project2['id'], project2) project_ref = self.resource_api.get_project_by_name( project2['name'], None) self.assertEqual(project2, project_ref) def test_get_project_by_name_returns_not_found(self): self.assertRaises(exception.ProjectNotFound, self.resource_api.get_project_by_name, uuid.uuid4().hex, CONF.identity.default_domain_id) def test_create_duplicate_project_id_fails(self): project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id) project_id = project['id'] self.resource_api.create_project(project_id, project) project['name'] = 'fake2' self.assertRaises(exception.Conflict, self.resource_api.create_project, project_id, project) def test_create_duplicate_project_name_fails(self): project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id) project_id = project['id'] self.resource_api.create_project(project_id, project) project['id'] = 'fake2' self.assertRaises(exception.Conflict, self.resource_api.create_project, project['id'], project) def test_create_duplicate_project_name_in_different_domains(self): new_domain = unit.new_domain_ref() self.resource_api.create_domain(new_domain['id'], new_domain) project1 = unit.new_project_ref( domain_id=CONF.identity.default_domain_id) project2 = unit.new_project_ref(name=project1['name'], domain_id=new_domain['id']) self.resource_api.create_project(project1['id'], project1) self.resource_api.create_project(project2['id'], project2) def test_move_project_between_domains(self): domain1 = unit.new_domain_ref() self.resource_api.create_domain(domain1['id'], domain1) domain2 = unit.new_domain_ref() self.resource_api.create_domain(domain2['id'], domain2) project = unit.new_project_ref(domain_id=domain1['id']) self.resource_api.create_project(project['id'], project) project['domain_id'] = domain2['id'] # Update the project asserting that a deprecation warning is emitted with mock.patch( 'oslo_log.versionutils.report_deprecated_feature') as mock_dep: self.resource_api.update_project(project['id'], project) self.assertTrue(mock_dep.called) updated_project_ref = self.resource_api.get_project(project['id']) self.assertEqual(domain2['id'], updated_project_ref['domain_id']) def test_move_project_between_domains_with_clashing_names_fails(self): domain1 = unit.new_domain_ref() self.resource_api.create_domain(domain1['id'], domain1) domain2 = unit.new_domain_ref() self.resource_api.create_domain(domain2['id'], domain2) # First, create a project in domain1 project1 = unit.new_project_ref(domain_id=domain1['id']) self.resource_api.create_project(project1['id'], project1) # Now create a project in domain2 with a potentially clashing # name - which should work since we have domain separation project2 = unit.new_project_ref(name=project1['name'], domain_id=domain2['id']) self.resource_api.create_project(project2['id'], project2) # Now try and move project1 into the 2nd domain - which should # fail since the names clash project1['domain_id'] = domain2['id'] self.assertRaises(exception.Conflict, self.resource_api.update_project, project1['id'], project1) @unit.skip_if_no_multiple_domains_support def test_move_project_with_children_between_domains_fails(self): domain1 = unit.new_domain_ref() self.resource_api.create_domain(domain1['id'], domain1) domain2 = unit.new_domain_ref() self.resource_api.create_domain(domain2['id'], domain2) project = unit.new_project_ref(domain_id=domain1['id']) self.resource_api.create_project(project['id'], project) child_project = unit.new_project_ref(domain_id=domain1['id'], parent_id=project['id']) self.resource_api.create_project(child_project['id'], child_project) project['domain_id'] = domain2['id'] # Update is not allowed, since updating the whole subtree would be # necessary self.assertRaises(exception.ValidationError, self.resource_api.update_project, project['id'], project) @unit.skip_if_no_multiple_domains_support def test_move_project_not_root_between_domains_fails(self): domain1 = unit.new_domain_ref() self.resource_api.create_domain(domain1['id'], domain1) domain2 = unit.new_domain_ref() self.resource_api.create_domain(domain2['id'], domain2) project = unit.new_project_ref(domain_id=domain1['id']) self.resource_api.create_project(project['id'], project) child_project = unit.new_project_ref(domain_id=domain1['id'], parent_id=project['id']) self.resource_api.create_project(child_project['id'], child_project) child_project['domain_id'] = domain2['id'] self.assertRaises(exception.ValidationError, self.resource_api.update_project, child_project['id'], child_project) @unit.skip_if_no_multiple_domains_support def test_move_root_project_between_domains_succeeds(self): domain1 = unit.new_domain_ref() self.resource_api.create_domain(domain1['id'], domain1) domain2 = unit.new_domain_ref() self.resource_api.create_domain(domain2['id'], domain2) root_project = unit.new_project_ref(domain_id=domain1['id']) root_project = self.resource_api.create_project(root_project['id'], root_project) root_project['domain_id'] = domain2['id'] self.resource_api.update_project(root_project['id'], root_project) project_from_db = self.resource_api.get_project(root_project['id']) self.assertEqual(domain2['id'], project_from_db['domain_id']) @unit.skip_if_no_multiple_domains_support def test_update_domain_id_project_is_domain_fails(self): other_domain = unit.new_domain_ref() self.resource_api.create_domain(other_domain['id'], other_domain) project = unit.new_project_ref(is_domain=True) self.resource_api.create_project(project['id'], project) project['domain_id'] = other_domain['id'] # Update of domain_id of projects acting as domains is not allowed self.assertRaises(exception.ValidationError, self.resource_api.update_project, project['id'], project) def test_rename_duplicate_project_name_fails(self): project1 = unit.new_project_ref( domain_id=CONF.identity.default_domain_id) project2 = unit.new_project_ref( domain_id=CONF.identity.default_domain_id) self.resource_api.create_project(project1['id'], project1) self.resource_api.create_project(project2['id'], project2) project2['name'] = project1['name'] self.assertRaises(exception.Error, self.resource_api.update_project, project2['id'], project2) def test_update_project_id_does_nothing(self): project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id) project_id = project['id'] self.resource_api.create_project(project['id'], project) project['id'] = 'fake2' self.resource_api.update_project(project_id, project) project_ref = self.resource_api.get_project(project_id) self.assertEqual(project_id, project_ref['id']) self.assertRaises(exception.ProjectNotFound, self.resource_api.get_project, 'fake2') def test_delete_domain_with_user_group_project_links(self): # TODO(chungg):add test case once expected behaviour defined pass def test_update_project_returns_not_found(self): self.assertRaises(exception.ProjectNotFound, self.resource_api.update_project, uuid.uuid4().hex, dict()) def test_delete_project_returns_not_found(self): self.assertRaises(exception.ProjectNotFound, self.resource_api.delete_project, uuid.uuid4().hex) def test_create_update_delete_unicode_project(self): unicode_project_name = u'name \u540d\u5b57' project = unit.new_project_ref( name=unicode_project_name, domain_id=CONF.identity.default_domain_id) project = self.resource_api.create_project(project['id'], project) self.resource_api.update_project(project['id'], project) self.resource_api.delete_project(project['id']) def test_create_project_with_no_enabled_field(self): ref = unit.new_project_ref(domain_id=CONF.identity.default_domain_id) del ref['enabled'] self.resource_api.create_project(ref['id'], ref) project = self.resource_api.get_project(ref['id']) self.assertIs(project['enabled'], True) def test_create_project_long_name_fails(self): project = unit.new_project_ref( name='a' * 65, domain_id=CONF.identity.default_domain_id) self.assertRaises(exception.ValidationError, self.resource_api.create_project, project['id'], project) def test_create_project_blank_name_fails(self): project = unit.new_project_ref( name='', domain_id=CONF.identity.default_domain_id) self.assertRaises(exception.ValidationError, self.resource_api.create_project, project['id'], project) def test_create_project_invalid_name_fails(self): project = unit.new_project_ref( name=None, domain_id=CONF.identity.default_domain_id) self.assertRaises(exception.ValidationError, self.resource_api.create_project, project['id'], project) project = unit.new_project_ref( name=123, domain_id=CONF.identity.default_domain_id) self.assertRaises(exception.ValidationError, self.resource_api.create_project, project['id'], project) def test_update_project_blank_name_fails(self): project = unit.new_project_ref( name='fake1', domain_id=CONF.identity.default_domain_id) self.resource_api.create_project(project['id'], project) project['name'] = '' self.assertRaises(exception.ValidationError, self.resource_api.update_project, project['id'], project) def test_update_project_long_name_fails(self): project = unit.new_project_ref( name='fake1', domain_id=CONF.identity.default_domain_id) self.resource_api.create_project(project['id'], project) project['name'] = 'a' * 65 self.assertRaises(exception.ValidationError, self.resource_api.update_project, project['id'], project) def test_update_project_invalid_name_fails(self): project = unit.new_project_ref( name='fake1', domain_id=CONF.identity.default_domain_id) self.resource_api.create_project(project['id'], project) project['name'] = None self.assertRaises(exception.ValidationError, self.resource_api.update_project, project['id'], project) project['name'] = 123 self.assertRaises(exception.ValidationError, self.resource_api.update_project, project['id'], project) def test_update_project_invalid_enabled_type_string(self): project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id) self.resource_api.create_project(project['id'], project) project_ref = self.resource_api.get_project(project['id']) self.assertTrue(project_ref['enabled']) # Strings are not valid boolean values project['enabled'] = "false" self.assertRaises(exception.ValidationError, self.resource_api.update_project, project['id'], project) def test_create_project_invalid_enabled_type_string(self): project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id, # invalid string value enabled="true") self.assertRaises(exception.ValidationError, self.resource_api.create_project, project['id'], project) def test_create_project_invalid_domain_id(self): project = unit.new_project_ref(domain_id=uuid.uuid4().hex) self.assertRaises(exception.DomainNotFound, self.resource_api.create_project, project['id'], project) def test_list_domains(self): domain1 = unit.new_domain_ref() domain2 = unit.new_domain_ref() self.resource_api.create_domain(domain1['id'], domain1) self.resource_api.create_domain(domain2['id'], domain2) domains = self.resource_api.list_domains() self.assertEqual(3, len(domains)) domain_ids = [] for domain in domains: domain_ids.append(domain.get('id')) self.assertIn(CONF.identity.default_domain_id, domain_ids) self.assertIn(domain1['id'], domain_ids) self.assertIn(domain2['id'], domain_ids) def test_list_projects(self): project_refs = self.resource_api.list_projects() project_count = len(default_fixtures.TENANTS) + self.domain_count self.assertEqual(project_count, len(project_refs)) for project in default_fixtures.TENANTS: self.assertIn(project, project_refs) def test_list_projects_with_multiple_filters(self): # Create a project project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id) project = self.resource_api.create_project(project['id'], project) # Build driver hints with the project's name and inexistent description hints = driver_hints.Hints() hints.add_filter('name', project['name']) hints.add_filter('description', uuid.uuid4().hex) # Retrieve projects based on hints and check an empty list is returned projects = self.resource_api.list_projects(hints) self.assertEqual([], projects) # Build correct driver hints hints = driver_hints.Hints() hints.add_filter('name', project['name']) hints.add_filter('description', project['description']) # Retrieve projects based on hints projects = self.resource_api.list_projects(hints) # Check that the returned list contains only the first project self.assertEqual(1, len(projects)) self.assertEqual(project, projects[0]) def test_list_projects_for_domain(self): project_ids = ([x['id'] for x in self.resource_api.list_projects_in_domain( CONF.identity.default_domain_id)]) # Only the projects from the default fixtures are expected, since # filtering by domain does not include any project that acts as a # domain. self.assertThat( project_ids, matchers.HasLength(len(default_fixtures.TENANTS))) self.assertIn(self.tenant_bar['id'], project_ids) self.assertIn(self.tenant_baz['id'], project_ids) self.assertIn(self.tenant_mtu['id'], project_ids) self.assertIn(self.tenant_service['id'], project_ids) @unit.skip_if_no_multiple_domains_support def test_list_projects_acting_as_domain(self): initial_domains = self.resource_api.list_domains() # Creating 5 projects that act as domains new_projects_acting_as_domains = [] for i in range(5): project = unit.new_project_ref(is_domain=True) project = self.resource_api.create_project(project['id'], project) new_projects_acting_as_domains.append(project) # Creating a few regular project to ensure it doesn't mess with the # ones that act as domains self._create_projects_hierarchy(hierarchy_size=2) projects = self.resource_api.list_projects_acting_as_domain() expected_number_projects = ( len(initial_domains) + len(new_projects_acting_as_domains)) self.assertEqual(expected_number_projects, len(projects)) for project in new_projects_acting_as_domains: self.assertIn(project, projects) for domain in initial_domains: self.assertIn(domain['id'], [p['id'] for p in projects]) @unit.skip_if_no_multiple_domains_support def test_list_projects_for_alternate_domain(self): domain1 = unit.new_domain_ref() self.resource_api.create_domain(domain1['id'], domain1) project1 = unit.new_project_ref(domain_id=domain1['id']) self.resource_api.create_project(project1['id'], project1) project2 = unit.new_project_ref(domain_id=domain1['id']) self.resource_api.create_project(project2['id'], project2) project_ids = ([x['id'] for x in self.resource_api.list_projects_in_domain( domain1['id'])]) self.assertEqual(2, len(project_ids)) self.assertIn(project1['id'], project_ids) self.assertIn(project2['id'], project_ids) def _create_projects_hierarchy(self, hierarchy_size=2, domain_id=None, is_domain=False, parent_project_id=None): """Creates a project hierarchy with specified size. :param hierarchy_size: the desired hierarchy size, default is 2 - a project with one child. :param domain_id: domain where the projects hierarchy will be created. :param is_domain: if the hierarchy will have the is_domain flag active or not. :param parent_project_id: if the intention is to create a sub-hierarchy, sets the sub-hierarchy root. Defaults to creating a new hierarchy, i.e. a new root project. :returns projects: a list of the projects in the created hierarchy. """ if domain_id is None: domain_id = CONF.identity.default_domain_id if parent_project_id: project = unit.new_project_ref(parent_id=parent_project_id, domain_id=domain_id, is_domain=is_domain) else: project = unit.new_project_ref(domain_id=domain_id, is_domain=is_domain) project_id = project['id'] project = self.resource_api.create_project(project_id, project) projects = [project] for i in range(1, hierarchy_size): new_project = unit.new_project_ref(parent_id=project_id, domain_id=domain_id) self.resource_api.create_project(new_project['id'], new_project) projects.append(new_project) project_id = new_project['id'] return projects @unit.skip_if_no_multiple_domains_support def test_create_domain_with_project_api(self): project = unit.new_project_ref(is_domain=True) ref = self.resource_api.create_project(project['id'], project) self.assertTrue(ref['is_domain']) self.resource_api.get_domain(ref['id']) @unit.skip_if_no_multiple_domains_support def test_project_as_a_domain_uniqueness_constraints(self): """Tests project uniqueness for those acting as domains. If it is a project acting as a domain, we can't have two or more with the same name. """ # Create two projects acting as a domain project = unit.new_project_ref(is_domain=True) project = self.resource_api.create_project(project['id'], project) project2 = unit.new_project_ref(is_domain=True) project2 = self.resource_api.create_project(project2['id'], project2) # All projects acting as domains have a null domain_id, so should not # be able to create another with the same name but a different # project ID. new_project = project.copy() new_project['id'] = uuid.uuid4().hex self.assertRaises(exception.Conflict, self.resource_api.create_project, new_project['id'], new_project) # We also should not be able to update one to have a name clash project2['name'] = project['name'] self.assertRaises(exception.Conflict, self.resource_api.update_project, project2['id'], project2) # But updating it to a unique name is OK project2['name'] = uuid.uuid4().hex self.resource_api.update_project(project2['id'], project2) # Finally, it should be OK to create a project with same name as one of # these acting as a domain, as long as it is a regular project project3 = unit.new_project_ref( domain_id=CONF.identity.default_domain_id, name=project2['name']) self.resource_api.create_project(project3['id'], project3) # In fact, it should be OK to create such a project in the domain which # has the matching name. # TODO(henry-nash): Once we fully support projects acting as a domain, # add a test here to create a sub-project with a name that matches its # project acting as a domain @unit.skip_if_no_multiple_domains_support @test_utils.wip('waiting for sub projects acting as domains support') def test_is_domain_sub_project_has_parent_domain_id(self): project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id, is_domain=True) self.resource_api.create_project(project['id'], project) sub_project = unit.new_project_ref(domain_id=project['id'], parent_id=project['id'], is_domain=True) ref = self.resource_api.create_project(sub_project['id'], sub_project) self.assertTrue(ref['is_domain']) self.assertEqual(project['id'], ref['parent_id']) self.assertEqual(project['id'], ref['domain_id']) @unit.skip_if_no_multiple_domains_support def test_delete_domain_with_project_api(self): project = unit.new_project_ref(domain_id=None, is_domain=True) self.resource_api.create_project(project['id'], project) # Check that a corresponding domain was created self.resource_api.get_domain(project['id']) # Try to delete the enabled project that acts as a domain self.assertRaises(exception.ForbiddenNotSecurity, self.resource_api.delete_project, project['id']) # Disable the project project['enabled'] = False self.resource_api.update_project(project['id'], project) # Successfully delete the project self.resource_api.delete_project(project['id']) self.assertRaises(exception.ProjectNotFound, self.resource_api.get_project, project['id']) self.assertRaises(exception.DomainNotFound, self.resource_api.get_domain, project['id']) @unit.skip_if_no_multiple_domains_support def test_create_subproject_acting_as_domain_fails(self): root_project = unit.new_project_ref(is_domain=True) self.resource_api.create_project(root_project['id'], root_project) sub_project = unit.new_project_ref(is_domain=True, parent_id=root_project['id']) # Creation of sub projects acting as domains is not allowed yet self.assertRaises(exception.ValidationError, self.resource_api.create_project, sub_project['id'], sub_project) @unit.skip_if_no_multiple_domains_support def test_create_domain_under_regular_project_hierarchy_fails(self): # Projects acting as domains can't have a regular project as parent projects_hierarchy = self._create_projects_hierarchy() parent = projects_hierarchy[1] project = unit.new_project_ref(domain_id=parent['id'], parent_id=parent['id'], is_domain=True) self.assertRaises(exception.ValidationError, self.resource_api.create_project, project['id'], project) @unit.skip_if_no_multiple_domains_support @test_utils.wip('waiting for sub projects acting as domains support') def test_create_project_under_domain_hierarchy(self): projects_hierarchy = self._create_projects_hierarchy(is_domain=True) parent = projects_hierarchy[1] project = unit.new_project_ref(domain_id=parent['id'], parent_id=parent['id'], is_domain=False) ref = self.resource_api.create_project(project['id'], project) self.assertFalse(ref['is_domain']) self.assertEqual(parent['id'], ref['parent_id']) self.assertEqual(parent['id'], ref['domain_id']) def test_create_project_without_is_domain_flag(self): project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id) del project['is_domain'] ref = self.resource_api.create_project(project['id'], project) # The is_domain flag should be False by default self.assertFalse(ref['is_domain']) @unit.skip_if_no_multiple_domains_support def test_create_project_passing_is_domain_flag_true(self): project = unit.new_project_ref(is_domain=True) ref = self.resource_api.create_project(project['id'], project) self.assertTrue(ref['is_domain']) def test_create_project_passing_is_domain_flag_false(self): project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id, is_domain=False) ref = self.resource_api.create_project(project['id'], project) self.assertIs(False, ref['is_domain']) @test_utils.wip('waiting for support for parent_id to imply domain_id') def test_create_project_with_parent_id_and_without_domain_id(self): # First create a domain project = unit.new_project_ref(is_domain=True) self.resource_api.create_project(project['id'], project) # Now create a child by just naming the parent_id sub_project = unit.new_project_ref(parent_id=project['id']) ref = self.resource_api.create_project(sub_project['id'], sub_project) # The domain_id should be set to the parent domain_id self.assertEqual(project['domain_id'], ref['domain_id']) def test_create_project_with_domain_id_and_without_parent_id(self): # First create a domain project = unit.new_project_ref(is_domain=True) self.resource_api.create_project(project['id'], project) # Now create a child by just naming the domain_id sub_project = unit.new_project_ref(domain_id=project['id']) ref = self.resource_api.create_project(sub_project['id'], sub_project) # The parent_id and domain_id should be set to the id of the project # acting as a domain self.assertEqual(project['id'], ref['parent_id']) self.assertEqual(project['id'], ref['domain_id']) def test_create_project_with_domain_id_mismatch_to_parent_domain(self): # First create a domain project = unit.new_project_ref(is_domain=True) self.resource_api.create_project(project['id'], project) # Now try to create a child with the above as its parent, but # specifying a different domain. sub_project = unit.new_project_ref( parent_id=project['id'], domain_id=CONF.identity.default_domain_id) self.assertRaises(exception.ValidationError, self.resource_api.create_project, sub_project['id'], sub_project) def test_check_leaf_projects(self): projects_hierarchy = self._create_projects_hierarchy() root_project = projects_hierarchy[0] leaf_project = projects_hierarchy[1] self.assertFalse(self.resource_api.is_leaf_project( root_project['id'])) self.assertTrue(self.resource_api.is_leaf_project( leaf_project['id'])) # Delete leaf_project self.resource_api.delete_project(leaf_project['id']) # Now, root_project should be leaf self.assertTrue(self.resource_api.is_leaf_project( root_project['id'])) def test_list_projects_in_subtree(self): projects_hierarchy = self._create_projects_hierarchy(hierarchy_size=3) project1 = projects_hierarchy[0] project2 = projects_hierarchy[1] project3 = projects_hierarchy[2] project4 = unit.new_project_ref( domain_id=CONF.identity.default_domain_id, parent_id=project2['id']) self.resource_api.create_project(project4['id'], project4) subtree = self.resource_api.list_projects_in_subtree(project1['id']) self.assertEqual(3, len(subtree)) self.assertIn(project2, subtree) self.assertIn(project3, subtree) self.assertIn(project4, subtree) subtree = self.resource_api.list_projects_in_subtree(project2['id']) self.assertEqual(2, len(subtree)) self.assertIn(project3, subtree) self.assertIn(project4, subtree) subtree = self.resource_api.list_projects_in_subtree(project3['id']) self.assertEqual(0, len(subtree)) def test_list_projects_in_subtree_with_circular_reference(self): project1 = unit.new_project_ref( domain_id=CONF.identity.default_domain_id) project1 = self.resource_api.create_project(project1['id'], project1) project2 = unit.new_project_ref( domain_id=CONF.identity.default_domain_id, parent_id=project1['id']) self.resource_api.create_project(project2['id'], project2) project1['parent_id'] = project2['id'] # Adds cyclic reference # NOTE(dstanek): The manager does not allow parent_id to be updated. # Instead will directly use the driver to create the cyclic # reference. self.resource_api.driver.update_project(project1['id'], project1) subtree = self.resource_api.list_projects_in_subtree(project1['id']) # NOTE(dstanek): If a cyclic reference is detected the code bails # and returns None instead of falling into the infinite # recursion trap. self.assertIsNone(subtree) def test_list_projects_in_subtree_invalid_project_id(self): self.assertRaises(exception.ValidationError, self.resource_api.list_projects_in_subtree, None) self.assertRaises(exception.ProjectNotFound, self.resource_api.list_projects_in_subtree, uuid.uuid4().hex) def test_list_project_parents(self): projects_hierarchy = self._create_projects_hierarchy(hierarchy_size=3) project1 = projects_hierarchy[0] project2 = projects_hierarchy[1] project3 = projects_hierarchy[2] project4 = unit.new_project_ref( domain_id=CONF.identity.default_domain_id, parent_id=project2['id']) self.resource_api.create_project(project4['id'], project4) parents1 = self.resource_api.list_project_parents(project3['id']) self.assertEqual(3, len(parents1)) self.assertIn(project1, parents1) self.assertIn(project2, parents1) parents2 = self.resource_api.list_project_parents(project4['id']) self.assertEqual(parents1, parents2) parents = self.resource_api.list_project_parents(project1['id']) # It has the default domain as parent self.assertEqual(1, len(parents)) def test_update_project_enabled_cascade(self): """Test update_project_cascade Ensures the enabled attribute is correctly updated across a simple 3-level projects hierarchy. """ projects_hierarchy = self._create_projects_hierarchy(hierarchy_size=3) parent = projects_hierarchy[0] # Disable in parent project disables the whole subtree parent['enabled'] = False # Store the ref from backend in another variable so we don't bother # to remove other attributes that were not originally provided and # were set in the manager, like parent_id and domain_id. parent_ref = self.resource_api.update_project(parent['id'], parent, cascade=True) subtree = self.resource_api.list_projects_in_subtree(parent['id']) self.assertEqual(2, len(subtree)) self.assertFalse(parent_ref['enabled']) self.assertFalse(subtree[0]['enabled']) self.assertFalse(subtree[1]['enabled']) # Enable parent project enables the whole subtree parent['enabled'] = True parent_ref = self.resource_api.update_project(parent['id'], parent, cascade=True) subtree = self.resource_api.list_projects_in_subtree(parent['id']) self.assertEqual(2, len(subtree)) self.assertTrue(parent_ref['enabled']) self.assertTrue(subtree[0]['enabled']) self.assertTrue(subtree[1]['enabled']) def test_cannot_enable_cascade_with_parent_disabled(self): projects_hierarchy = self._create_projects_hierarchy(hierarchy_size=3) grandparent = projects_hierarchy[0] parent = projects_hierarchy[1] grandparent['enabled'] = False self.resource_api.update_project(grandparent['id'], grandparent, cascade=True) subtree = self.resource_api.list_projects_in_subtree(parent['id']) self.assertFalse(subtree[0]['enabled']) parent['enabled'] = True self.assertRaises(exception.ForbiddenNotSecurity, self.resource_api.update_project, parent['id'], parent, cascade=True) def test_update_cascade_only_accepts_enabled(self): # Update cascade does not accept any other attribute but 'enabled' new_project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id) self.resource_api.create_project(new_project['id'], new_project) new_project['name'] = 'project1' self.assertRaises(exception.ValidationError, self.resource_api.update_project, new_project['id'], new_project, cascade=True) def test_list_project_parents_invalid_project_id(self): self.assertRaises(exception.ValidationError, self.resource_api.list_project_parents, None) self.assertRaises(exception.ProjectNotFound, self.resource_api.list_project_parents, uuid.uuid4().hex) def test_create_project_doesnt_modify_passed_in_dict(self): new_project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id) original_project = new_project.copy() self.resource_api.create_project(new_project['id'], new_project) self.assertDictEqual(original_project, new_project) def test_update_project_enable(self): project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id) self.resource_api.create_project(project['id'], project) project_ref = self.resource_api.get_project(project['id']) self.assertTrue(project_ref['enabled']) project['enabled'] = False self.resource_api.update_project(project['id'], project) project_ref = self.resource_api.get_project(project['id']) self.assertEqual(project['enabled'], project_ref['enabled']) # If not present, enabled field should not be updated del project['enabled'] self.resource_api.update_project(project['id'], project) project_ref = self.resource_api.get_project(project['id']) self.assertFalse(project_ref['enabled']) project['enabled'] = True self.resource_api.update_project(project['id'], project) project_ref = self.resource_api.get_project(project['id']) self.assertEqual(project['enabled'], project_ref['enabled']) del project['enabled'] self.resource_api.update_project(project['id'], project) project_ref = self.resource_api.get_project(project['id']) self.assertTrue(project_ref['enabled']) def test_create_invalid_domain_fails(self): new_group = unit.new_group_ref(domain_id="doesnotexist") self.assertRaises(exception.DomainNotFound, self.identity_api.create_group, new_group) new_user = unit.new_user_ref(domain_id="doesnotexist") self.assertRaises(exception.DomainNotFound, self.identity_api.create_user, new_user) @unit.skip_if_no_multiple_domains_support def test_project_crud(self): domain = unit.new_domain_ref() self.resource_api.create_domain(domain['id'], domain) project = unit.new_project_ref(domain_id=domain['id']) self.resource_api.create_project(project['id'], project) project_ref = self.resource_api.get_project(project['id']) self.assertDictContainsSubset(project, project_ref) project['name'] = uuid.uuid4().hex self.resource_api.update_project(project['id'], project) project_ref = self.resource_api.get_project(project['id']) self.assertDictContainsSubset(project, project_ref) self.resource_api.delete_project(project['id']) self.assertRaises(exception.ProjectNotFound, self.resource_api.get_project, project['id']) def test_domain_delete_hierarchy(self): domain = unit.new_domain_ref() self.resource_api.create_domain(domain['id'], domain) # Creating a root and a leaf project inside the domain projects_hierarchy = self._create_projects_hierarchy( domain_id=domain['id']) root_project = projects_hierarchy[0] leaf_project = projects_hierarchy[0] # Disable the domain domain['enabled'] = False self.resource_api.update_domain(domain['id'], domain) # Delete the domain self.resource_api.delete_domain(domain['id']) # Make sure the domain no longer exists self.assertRaises(exception.DomainNotFound, self.resource_api.get_domain, domain['id']) # Make sure the root project no longer exists self.assertRaises(exception.ProjectNotFound, self.resource_api.get_project, root_project['id']) # Make sure the leaf project no longer exists self.assertRaises(exception.ProjectNotFound, self.resource_api.get_project, leaf_project['id']) def test_delete_projects_from_ids(self): """Tests the resource backend call delete_projects_from_ids. Tests the normal flow of the delete_projects_from_ids backend call, that ensures no project on the list exists after it is succesfully called. """ project1_ref = unit.new_project_ref( domain_id=CONF.identity.default_domain_id) project2_ref = unit.new_project_ref( domain_id=CONF.identity.default_domain_id) projects = (project1_ref, project2_ref) for project in projects: self.resource_api.create_project(project['id'], project) # Setting up the ID's list projects_ids = [p['id'] for p in projects] self.resource_api.driver.delete_projects_from_ids(projects_ids) # Ensuring projects no longer exist at backend level for project_id in projects_ids: self.assertRaises(exception.ProjectNotFound, self.resource_api.driver.get_project, project_id) # Passing an empty list is silently ignored self.resource_api.driver.delete_projects_from_ids([]) def test_delete_projects_from_ids_with_no_existing_project_id(self): """Tests delete_projects_from_ids issues warning if not found. Tests the resource backend call delete_projects_from_ids passing a non existing ID in project_ids, which is logged and ignored by the backend. """ project_ref = unit.new_project_ref( domain_id=CONF.identity.default_domain_id) self.resource_api.create_project(project_ref['id'], project_ref) # Setting up the ID's list projects_ids = (project_ref['id'], uuid.uuid4().hex) with mock.patch('keystone.resource.backends.sql.LOG') as mock_log: self.resource_api.delete_projects_from_ids(projects_ids) self.assertTrue(mock_log.warning.called) # The existing project was deleted. self.assertRaises(exception.ProjectNotFound, self.resource_api.driver.get_project, project_ref['id']) # Even if we only have one project, and it does not exist, it returns # no error. self.resource_api.driver.delete_projects_from_ids([uuid.uuid4().hex]) def test_delete_project_cascade(self): # create a hierarchy with 3 levels projects_hierarchy = self._create_projects_hierarchy(hierarchy_size=3) root_project = projects_hierarchy[0] project1 = projects_hierarchy[1] project2 = projects_hierarchy[2] # Disabling all projects before attempting to delete for project in (project2, project1, root_project): project['enabled'] = False self.resource_api.update_project(project['id'], project) self.resource_api.delete_project(root_project['id'], cascade=True) for project in projects_hierarchy: self.assertRaises(exception.ProjectNotFound, self.resource_api.get_project, project['id']) def test_delete_large_project_cascade(self): """Try delete a large project with cascade true. Tree we will create:: +-p1-+ | | p5 p2 | | p6 +-p3-+ | | p7 p4 """ # create a hierarchy with 4 levels projects_hierarchy = self._create_projects_hierarchy(hierarchy_size=4) p1 = projects_hierarchy[0] # Add the left branch to the hierarchy (p5, p6) self._create_projects_hierarchy(hierarchy_size=2, parent_project_id=p1['id']) # Add p7 to the hierarchy p3_id = projects_hierarchy[2]['id'] self._create_projects_hierarchy(hierarchy_size=1, parent_project_id=p3_id) # Reverse the hierarchy to disable the leaf first prjs_hierarchy = ([p1] + self.resource_api.list_projects_in_subtree( p1['id']))[::-1] # Disabling all projects before attempting to delete for project in prjs_hierarchy: project['enabled'] = False self.resource_api.update_project(project['id'], project) self.resource_api.delete_project(p1['id'], cascade=True) for project in prjs_hierarchy: self.assertRaises(exception.ProjectNotFound, self.resource_api.get_project, project['id']) def test_cannot_delete_project_cascade_with_enabled_child(self): # create a hierarchy with 3 levels projects_hierarchy = self._create_projects_hierarchy(hierarchy_size=3) root_project = projects_hierarchy[0] project1 = projects_hierarchy[1] project2 = projects_hierarchy[2] project2['enabled'] = False self.resource_api.update_project(project2['id'], project2) # Cannot cascade delete root_project, since project1 is enabled self.assertRaises(exception.ForbiddenNotSecurity, self.resource_api.delete_project, root_project['id'], cascade=True) # Ensuring no project was deleted, not even project2 self.resource_api.get_project(root_project['id']) self.resource_api.get_project(project1['id']) self.resource_api.get_project(project2['id']) def test_hierarchical_projects_crud(self): # create a hierarchy with just a root project (which is a leaf as well) projects_hierarchy = self._create_projects_hierarchy(hierarchy_size=1) root_project1 = projects_hierarchy[0] # create a hierarchy with one root project and one leaf project projects_hierarchy = self._create_projects_hierarchy() root_project2 = projects_hierarchy[0] leaf_project = projects_hierarchy[1] # update description from leaf_project leaf_project['description'] = 'new description' self.resource_api.update_project(leaf_project['id'], leaf_project) proj_ref = self.resource_api.get_project(leaf_project['id']) self.assertDictEqual(leaf_project, proj_ref) # update the parent_id is not allowed leaf_project['parent_id'] = root_project1['id'] self.assertRaises(exception.ForbiddenNotSecurity, self.resource_api.update_project, leaf_project['id'], leaf_project) # delete root_project1 self.resource_api.delete_project(root_project1['id']) self.assertRaises(exception.ProjectNotFound, self.resource_api.get_project, root_project1['id']) # delete root_project2 is not allowed since it is not a leaf project self.assertRaises(exception.ForbiddenNotSecurity, self.resource_api.delete_project, root_project2['id']) def test_create_project_with_invalid_parent(self): project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id, parent_id='fake') self.assertRaises(exception.ProjectNotFound, self.resource_api.create_project, project['id'], project) @unit.skip_if_no_multiple_domains_support def test_create_leaf_project_with_different_domain(self): root_project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id) self.resource_api.create_project(root_project['id'], root_project) domain = unit.new_domain_ref() self.resource_api.create_domain(domain['id'], domain) leaf_project = unit.new_project_ref(domain_id=domain['id'], parent_id=root_project['id']) self.assertRaises(exception.ValidationError, self.resource_api.create_project, leaf_project['id'], leaf_project) def test_delete_hierarchical_leaf_project(self): projects_hierarchy = self._create_projects_hierarchy() root_project = projects_hierarchy[0] leaf_project = projects_hierarchy[1] self.resource_api.delete_project(leaf_project['id']) self.assertRaises(exception.ProjectNotFound, self.resource_api.get_project, leaf_project['id']) self.resource_api.delete_project(root_project['id']) self.assertRaises(exception.ProjectNotFound, self.resource_api.get_project, root_project['id']) def test_delete_hierarchical_not_leaf_project(self): projects_hierarchy = self._create_projects_hierarchy() root_project = projects_hierarchy[0] self.assertRaises(exception.ForbiddenNotSecurity, self.resource_api.delete_project, root_project['id']) def test_update_project_parent(self): projects_hierarchy = self._create_projects_hierarchy(hierarchy_size=3) project1 = projects_hierarchy[0] project2 = projects_hierarchy[1] project3 = projects_hierarchy[2] # project2 is the parent from project3 self.assertEqual(project3.get('parent_id'), project2['id']) # try to update project3 parent to parent1 project3['parent_id'] = project1['id'] self.assertRaises(exception.ForbiddenNotSecurity, self.resource_api.update_project, project3['id'], project3) def test_create_project_under_disabled_one(self): project1 = unit.new_project_ref( domain_id=CONF.identity.default_domain_id, enabled=False) self.resource_api.create_project(project1['id'], project1) project2 = unit.new_project_ref( domain_id=CONF.identity.default_domain_id, parent_id=project1['id']) # It's not possible to create a project under a disabled one in the # hierarchy self.assertRaises(exception.ValidationError, self.resource_api.create_project, project2['id'], project2) def test_disable_hierarchical_leaf_project(self): projects_hierarchy = self._create_projects_hierarchy() leaf_project = projects_hierarchy[1] leaf_project['enabled'] = False self.resource_api.update_project(leaf_project['id'], leaf_project) project_ref = self.resource_api.get_project(leaf_project['id']) self.assertEqual(leaf_project['enabled'], project_ref['enabled']) def test_disable_hierarchical_not_leaf_project(self): projects_hierarchy = self._create_projects_hierarchy() root_project = projects_hierarchy[0] root_project['enabled'] = False self.assertRaises(exception.ForbiddenNotSecurity, self.resource_api.update_project, root_project['id'], root_project) def test_enable_project_with_disabled_parent(self): projects_hierarchy = self._create_projects_hierarchy() root_project = projects_hierarchy[0] leaf_project = projects_hierarchy[1] # Disable leaf and root leaf_project['enabled'] = False self.resource_api.update_project(leaf_project['id'], leaf_project) root_project['enabled'] = False self.resource_api.update_project(root_project['id'], root_project) # Try to enable the leaf project, it's not possible since it has # a disabled parent leaf_project['enabled'] = True self.assertRaises(exception.ForbiddenNotSecurity, self.resource_api.update_project, leaf_project['id'], leaf_project) def _get_hierarchy_depth(self, project_id): return len(self.resource_api.list_project_parents(project_id)) + 1 def test_check_hierarchy_depth(self): # Should be allowed to have a hierarchy of the max depth specified # in the config option plus one (to allow for the additional project # acting as a domain after an upgrade) projects_hierarchy = self._create_projects_hierarchy( CONF.max_project_tree_depth) leaf_project = projects_hierarchy[CONF.max_project_tree_depth - 1] depth = self._get_hierarchy_depth(leaf_project['id']) self.assertEqual(CONF.max_project_tree_depth + 1, depth) # Creating another project in the hierarchy shouldn't be allowed project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id, parent_id=leaf_project['id']) self.assertRaises(exception.ForbiddenNotSecurity, self.resource_api.create_project, project['id'], project) def test_project_update_missing_attrs_with_a_value(self): # Creating a project with no description attribute. project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id) del project['description'] project = self.resource_api.create_project(project['id'], project) # Add a description attribute. project['description'] = uuid.uuid4().hex self.resource_api.update_project(project['id'], project) project_ref = self.resource_api.get_project(project['id']) self.assertDictEqual(project, project_ref) def test_project_update_missing_attrs_with_a_falsey_value(self): # Creating a project with no description attribute. project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id) del project['description'] project = self.resource_api.create_project(project['id'], project) # Add a description attribute. project['description'] = '' self.resource_api.update_project(project['id'], project) project_ref = self.resource_api.get_project(project['id']) self.assertDictEqual(project, project_ref) def test_domain_crud(self): domain = unit.new_domain_ref() domain_ref = self.resource_api.create_domain(domain['id'], domain) self.assertDictEqual(domain, domain_ref) domain_ref = self.resource_api.get_domain(domain['id']) self.assertDictEqual(domain, domain_ref) domain['name'] = uuid.uuid4().hex domain_ref = self.resource_api.update_domain(domain['id'], domain) self.assertDictEqual(domain, domain_ref) domain_ref = self.resource_api.get_domain(domain['id']) self.assertDictEqual(domain, domain_ref) # Ensure an 'enabled' domain cannot be deleted self.assertRaises(exception.ForbiddenNotSecurity, self.resource_api.delete_domain, domain_id=domain['id']) # Disable the domain domain['enabled'] = False self.resource_api.update_domain(domain['id'], domain) # Delete the domain self.resource_api.delete_domain(domain['id']) # Make sure the domain no longer exists self.assertRaises(exception.DomainNotFound, self.resource_api.get_domain, domain['id']) @unit.skip_if_no_multiple_domains_support def test_domain_name_case_sensitivity(self): # create a ref with a lowercase name domain_name = 'test_domain' ref = unit.new_domain_ref(name=domain_name) lower_case_domain = self.resource_api.create_domain(ref['id'], ref) # assign a new ID to the ref with the same name, but in uppercase ref['id'] = uuid.uuid4().hex ref['name'] = domain_name.upper() upper_case_domain = self.resource_api.create_domain(ref['id'], ref) # We can get each domain by name lower_case_domain_ref = self.resource_api.get_domain_by_name( domain_name) self.assertDictEqual(lower_case_domain, lower_case_domain_ref) upper_case_domain_ref = self.resource_api.get_domain_by_name( domain_name.upper()) self.assertDictEqual(upper_case_domain, upper_case_domain_ref) def test_project_attribute_update(self): project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id) self.resource_api.create_project(project['id'], project) # pick a key known to be non-existent key = 'description' def assert_key_equals(value): project_ref = self.resource_api.update_project( project['id'], project) self.assertEqual(value, project_ref[key]) project_ref = self.resource_api.get_project(project['id']) self.assertEqual(value, project_ref[key]) def assert_get_key_is(value): project_ref = self.resource_api.update_project( project['id'], project) self.assertIs(project_ref.get(key), value) project_ref = self.resource_api.get_project(project['id']) self.assertIs(project_ref.get(key), value) # add an attribute that doesn't exist, set it to a falsey value value = '' project[key] = value assert_key_equals(value) # set an attribute with a falsey value to null value = None project[key] = value assert_get_key_is(value) # do it again, in case updating from this situation is handled oddly value = None project[key] = value assert_get_key_is(value) # set a possibly-null value to a falsey value value = '' project[key] = value assert_key_equals(value) # set a falsey value to a truthy value value = uuid.uuid4().hex project[key] = value assert_key_equals(value) @unit.skip_if_cache_disabled('resource') @unit.skip_if_no_multiple_domains_support def test_domain_rename_invalidates_get_domain_by_name_cache(self): domain = unit.new_domain_ref() domain_id = domain['id'] domain_name = domain['name'] self.resource_api.create_domain(domain_id, domain) domain_ref = self.resource_api.get_domain_by_name(domain_name) domain_ref['name'] = uuid.uuid4().hex self.resource_api.update_domain(domain_id, domain_ref) self.assertRaises(exception.DomainNotFound, self.resource_api.get_domain_by_name, domain_name) @unit.skip_if_cache_disabled('resource') def test_cache_layer_domain_crud(self): domain = unit.new_domain_ref() domain_id = domain['id'] # Create Domain self.resource_api.create_domain(domain_id, domain) project_domain_ref = self.resource_api.get_project(domain_id) domain_ref = self.resource_api.get_domain(domain_id) updated_project_domain_ref = copy.deepcopy(project_domain_ref) updated_project_domain_ref['name'] = uuid.uuid4().hex updated_domain_ref = copy.deepcopy(domain_ref) updated_domain_ref['name'] = updated_project_domain_ref['name'] # Update domain, bypassing resource api manager self.resource_api.driver.update_project(domain_id, updated_project_domain_ref) # Verify get_domain still returns the domain self.assertDictContainsSubset( domain_ref, self.resource_api.get_domain(domain_id)) # Invalidate cache self.resource_api.get_domain.invalidate(self.resource_api, domain_id) # Verify get_domain returns the updated domain self.assertDictContainsSubset( updated_domain_ref, self.resource_api.get_domain(domain_id)) # Update the domain back to original ref, using the assignment api # manager self.resource_api.update_domain(domain_id, domain_ref) self.assertDictContainsSubset( domain_ref, self.resource_api.get_domain(domain_id)) # Make sure domain is 'disabled', bypass resource api manager project_domain_ref_disabled = project_domain_ref.copy() project_domain_ref_disabled['enabled'] = False self.resource_api.driver.update_project(domain_id, project_domain_ref_disabled) self.resource_api.driver.update_project(domain_id, {'enabled': False}) # Delete domain, bypassing resource api manager self.resource_api.driver.delete_project(domain_id) # Verify get_domain still returns the domain self.assertDictContainsSubset( domain_ref, self.resource_api.get_domain(domain_id)) # Invalidate cache self.resource_api.get_domain.invalidate(self.resource_api, domain_id) # Verify get_domain now raises DomainNotFound self.assertRaises(exception.DomainNotFound, self.resource_api.get_domain, domain_id) # Recreate Domain self.resource_api.create_domain(domain_id, domain) self.resource_api.get_domain(domain_id) # Make sure domain is 'disabled', bypass resource api manager domain['enabled'] = False self.resource_api.driver.update_project(domain_id, domain) self.resource_api.driver.update_project(domain_id, {'enabled': False}) # Delete domain self.resource_api.delete_domain(domain_id) # verify DomainNotFound raised self.assertRaises(exception.DomainNotFound, self.resource_api.get_domain, domain_id) @unit.skip_if_cache_disabled('resource') @unit.skip_if_no_multiple_domains_support def test_project_rename_invalidates_get_project_by_name_cache(self): domain = unit.new_domain_ref() project = unit.new_project_ref(domain_id=domain['id']) project_id = project['id'] project_name = project['name'] self.resource_api.create_domain(domain['id'], domain) # Create a project self.resource_api.create_project(project_id, project) self.resource_api.get_project_by_name(project_name, domain['id']) project['name'] = uuid.uuid4().hex self.resource_api.update_project(project_id, project) self.assertRaises(exception.ProjectNotFound, self.resource_api.get_project_by_name, project_name, domain['id']) @unit.skip_if_cache_disabled('resource') @unit.skip_if_no_multiple_domains_support def test_cache_layer_project_crud(self): domain = unit.new_domain_ref() project = unit.new_project_ref(domain_id=domain['id']) project_id = project['id'] self.resource_api.create_domain(domain['id'], domain) # Create a project self.resource_api.create_project(project_id, project) self.resource_api.get_project(project_id) updated_project = copy.deepcopy(project) updated_project['name'] = uuid.uuid4().hex # Update project, bypassing resource manager self.resource_api.driver.update_project(project_id, updated_project) # Verify get_project still returns the original project_ref self.assertDictContainsSubset( project, self.resource_api.get_project(project_id)) # Invalidate cache self.resource_api.get_project.invalidate(self.resource_api, project_id) # Verify get_project now returns the new project self.assertDictContainsSubset( updated_project, self.resource_api.get_project(project_id)) # Update project using the resource_api manager back to original self.resource_api.update_project(project['id'], project) # Verify get_project returns the original project_ref self.assertDictContainsSubset( project, self.resource_api.get_project(project_id)) # Delete project bypassing resource self.resource_api.driver.delete_project(project_id) # Verify get_project still returns the project_ref self.assertDictContainsSubset( project, self.resource_api.get_project(project_id)) # Invalidate cache self.resource_api.get_project.invalidate(self.resource_api, project_id) # Verify ProjectNotFound now raised self.assertRaises(exception.ProjectNotFound, self.resource_api.get_project, project_id) # recreate project self.resource_api.create_project(project_id, project) self.resource_api.get_project(project_id) # delete project self.resource_api.delete_project(project_id) # Verify ProjectNotFound is raised self.assertRaises(exception.ProjectNotFound, self.resource_api.get_project, project_id) @unit.skip_if_no_multiple_domains_support def test_get_default_domain_by_name(self): domain_name = 'default' domain = unit.new_domain_ref(name=domain_name) self.resource_api.create_domain(domain['id'], domain) domain_ref = self.resource_api.get_domain_by_name(domain_name) self.assertEqual(domain, domain_ref) def test_get_not_default_domain_by_name(self): domain_name = 'foo' self.assertRaises(exception.DomainNotFound, self.resource_api.get_domain_by_name, domain_name) def test_project_update_and_project_get_return_same_response(self): project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id) self.resource_api.create_project(project['id'], project) updated_project = {'enabled': False} updated_project_ref = self.resource_api.update_project( project['id'], updated_project) # SQL backend adds 'extra' field updated_project_ref.pop('extra', None) self.assertIs(False, updated_project_ref['enabled']) project_ref = self.resource_api.get_project(project['id']) self.assertDictEqual(updated_project_ref, project_ref) class ResourceDriverTests(object): """Tests for the resource driver. Subclasses must set self.driver to the driver instance. """ def test_create_project(self): project_id = uuid.uuid4().hex project = { 'name': uuid.uuid4().hex, 'id': project_id, 'domain_id': uuid.uuid4().hex, } self.driver.create_project(project_id, project) def test_create_project_all_defined_properties(self): project_id = uuid.uuid4().hex project = { 'name': uuid.uuid4().hex, 'id': project_id, 'domain_id': uuid.uuid4().hex, 'description': uuid.uuid4().hex, 'enabled': True, 'parent_id': uuid.uuid4().hex, 'is_domain': True, } self.driver.create_project(project_id, project) def test_create_project_null_domain(self): project_id = uuid.uuid4().hex project = { 'name': uuid.uuid4().hex, 'id': project_id, 'domain_id': None, } self.driver.create_project(project_id, project) def test_create_project_same_name_same_domain_conflict(self): name = uuid.uuid4().hex domain_id = uuid.uuid4().hex project_id = uuid.uuid4().hex project = { 'name': name, 'id': project_id, 'domain_id': domain_id, } self.driver.create_project(project_id, project) project_id = uuid.uuid4().hex project = { 'name': name, 'id': project_id, 'domain_id': domain_id, } self.assertRaises(exception.Conflict, self.driver.create_project, project_id, project) def test_create_project_same_id_conflict(self): project_id = uuid.uuid4().hex project = { 'name': uuid.uuid4().hex, 'id': project_id, 'domain_id': uuid.uuid4().hex, } self.driver.create_project(project_id, project) project = { 'name': uuid.uuid4().hex, 'id': project_id, 'domain_id': uuid.uuid4().hex, } self.assertRaises(exception.Conflict, self.driver.create_project, project_id, project) keystone-9.0.0/keystone/tests/unit/resource/config_backends/0000775000567000056710000000000012701407246025435 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/resource/config_backends/test_sql.py0000664000567000056710000000401412701407102027633 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common import sql from keystone.resource.config_backends import sql as config_sql from keystone.tests import unit from keystone.tests.unit.backend import core_sql from keystone.tests.unit.ksfixtures import database from keystone.tests.unit.resource import test_core class SqlDomainConfigModels(core_sql.BaseBackendSqlModels): def test_whitelisted_model(self): cols = (('domain_id', sql.String, 64), ('group', sql.String, 255), ('option', sql.String, 255), ('value', sql.JsonBlob, None)) self.assertExpectedSchema('whitelisted_config', cols) def test_sensitive_model(self): cols = (('domain_id', sql.String, 64), ('group', sql.String, 255), ('option', sql.String, 255), ('value', sql.JsonBlob, None)) self.assertExpectedSchema('sensitive_config', cols) class SqlDomainConfigDriver(unit.BaseTestCase, test_core.DomainConfigDriverTests): def setUp(self): super(SqlDomainConfigDriver, self).setUp() self.useFixture(database.Database()) self.driver = config_sql.DomainConfig() class SqlDomainConfig(core_sql.BaseBackendSqlTests, test_core.DomainConfigTests): def setUp(self): super(SqlDomainConfig, self).setUp() # test_core.DomainConfigTests is effectively a mixin class, so make # sure we call its setup test_core.DomainConfigTests.setUp(self) keystone-9.0.0/keystone/tests/unit/resource/config_backends/__init__.py0000664000567000056710000000000012701407102027523 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/test_v3_protection.py0000664000567000056710000022626512701407102024722 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from oslo_config import cfg from oslo_serialization import jsonutils from six.moves import http_client from keystone import exception from keystone.tests import unit from keystone.tests.unit import ksfixtures from keystone.tests.unit.ksfixtures import temporaryfile from keystone.tests.unit import test_v3 from keystone.tests.unit import utils CONF = cfg.CONF class IdentityTestProtectedCase(test_v3.RestfulTestCase): """Test policy enforcement on the v3 Identity API.""" def _policy_fixture(self): return ksfixtures.Policy(self.tmpfilename, self.config_fixture) def setUp(self): """Setup for Identity Protection Test Cases. As well as the usual housekeeping, create a set of domains, users, roles and projects for the subsequent tests: - Three domains: A,B & C. C is disabled. - DomainA has user1, DomainB has user2 and user3 - DomainA has group1 and group2, DomainB has group3 - User1 has two roles on DomainA - User2 has one role on DomainA Remember that there will also be a fourth domain in existence, the default domain. """ self.tempfile = self.useFixture(temporaryfile.SecureTempFile()) self.tmpfilename = self.tempfile.file_name super(IdentityTestProtectedCase, self).setUp() # A default auth request we can use - un-scoped user token self.auth = self.build_authentication_request( user_id=self.user1['id'], password=self.user1['password']) def load_sample_data(self): self._populate_default_domain() # Start by creating a couple of domains self.domainA = unit.new_domain_ref() self.resource_api.create_domain(self.domainA['id'], self.domainA) self.domainB = unit.new_domain_ref() self.resource_api.create_domain(self.domainB['id'], self.domainB) self.domainC = unit.new_domain_ref(enabled=False) self.resource_api.create_domain(self.domainC['id'], self.domainC) # Now create some users, one in domainA and two of them in domainB self.user1 = unit.create_user(self.identity_api, domain_id=self.domainA['id']) self.user2 = unit.create_user(self.identity_api, domain_id=self.domainB['id']) self.user3 = unit.create_user(self.identity_api, domain_id=self.domainB['id']) self.group1 = unit.new_group_ref(domain_id=self.domainA['id']) self.group1 = self.identity_api.create_group(self.group1) self.group2 = unit.new_group_ref(domain_id=self.domainA['id']) self.group2 = self.identity_api.create_group(self.group2) self.group3 = unit.new_group_ref(domain_id=self.domainB['id']) self.group3 = self.identity_api.create_group(self.group3) self.role = unit.new_role_ref() self.role_api.create_role(self.role['id'], self.role) self.role1 = unit.new_role_ref() self.role_api.create_role(self.role1['id'], self.role1) self.assignment_api.create_grant(self.role['id'], user_id=self.user1['id'], domain_id=self.domainA['id']) self.assignment_api.create_grant(self.role['id'], user_id=self.user2['id'], domain_id=self.domainA['id']) self.assignment_api.create_grant(self.role1['id'], user_id=self.user1['id'], domain_id=self.domainA['id']) def _get_id_list_from_ref_list(self, ref_list): result_list = [] for x in ref_list: result_list.append(x['id']) return result_list def _set_policy(self, new_policy): with open(self.tmpfilename, "w") as policyfile: policyfile.write(jsonutils.dumps(new_policy)) def test_list_users_unprotected(self): """GET /users (unprotected) Test Plan: - Update policy so api is unprotected - Use an un-scoped token to make sure we can get back all the users independent of domain """ self._set_policy({"identity:list_users": []}) r = self.get('/users', auth=self.auth) id_list = self._get_id_list_from_ref_list(r.result.get('users')) self.assertIn(self.user1['id'], id_list) self.assertIn(self.user2['id'], id_list) self.assertIn(self.user3['id'], id_list) def test_list_users_filtered_by_domain(self): """GET /users?domain_id=mydomain (filtered) Test Plan: - Update policy so api is unprotected - Use an un-scoped token to make sure we can filter the users by domainB, getting back the 2 users in that domain """ self._set_policy({"identity:list_users": []}) url_by_name = '/users?domain_id=%s' % self.domainB['id'] r = self.get(url_by_name, auth=self.auth) # We should get back two users, those in DomainB id_list = self._get_id_list_from_ref_list(r.result.get('users')) self.assertIn(self.user2['id'], id_list) self.assertIn(self.user3['id'], id_list) def test_get_user_protected_match_id(self): """GET /users/{id} (match payload) Test Plan: - Update policy to protect api by user_id - List users with user_id of user1 as filter, to check that this will correctly match user_id in the flattened payload """ # TODO(henry-nash, ayoung): It would be good to expand this # test for further test flattening, e.g. protect on, say, an # attribute of an object being created new_policy = {"identity:get_user": [["user_id:%(user_id)s"]]} self._set_policy(new_policy) url_by_name = '/users/%s' % self.user1['id'] r = self.get(url_by_name, auth=self.auth) self.assertEqual(self.user1['id'], r.result['user']['id']) def test_get_user_protected_match_target(self): """GET /users/{id} (match target) Test Plan: - Update policy to protect api by domain_id - Try and read a user who is in DomainB with a token scoped to Domain A - this should fail - Retry this for a user who is in Domain A, which should succeed. - Finally, try getting a user that does not exist, which should still return UserNotFound """ new_policy = {'identity:get_user': [["domain_id:%(target.user.domain_id)s"]]} self._set_policy(new_policy) self.auth = self.build_authentication_request( user_id=self.user1['id'], password=self.user1['password'], domain_id=self.domainA['id']) url_by_name = '/users/%s' % self.user2['id'] r = self.get(url_by_name, auth=self.auth, expected_status=exception.ForbiddenAction.code) url_by_name = '/users/%s' % self.user1['id'] r = self.get(url_by_name, auth=self.auth) self.assertEqual(self.user1['id'], r.result['user']['id']) url_by_name = '/users/%s' % uuid.uuid4().hex r = self.get(url_by_name, auth=self.auth, expected_status=exception.UserNotFound.code) def test_revoke_grant_protected_match_target(self): """DELETE /domains/{id}/users/{id}/roles/{id} (match target) Test Plan: - Update policy to protect api by domain_id of entities in the grant - Try and delete the existing grant that has a user who is from a different domain - this should fail. - Retry this for a user who is in Domain A, which should succeed. """ new_policy = {'identity:revoke_grant': [["domain_id:%(target.user.domain_id)s"]]} self._set_policy(new_policy) collection_url = ( '/domains/%(domain_id)s/users/%(user_id)s/roles' % { 'domain_id': self.domainA['id'], 'user_id': self.user2['id']}) member_url = '%(collection_url)s/%(role_id)s' % { 'collection_url': collection_url, 'role_id': self.role['id']} self.auth = self.build_authentication_request( user_id=self.user1['id'], password=self.user1['password'], domain_id=self.domainA['id']) self.delete(member_url, auth=self.auth, expected_status=exception.ForbiddenAction.code) collection_url = ( '/domains/%(domain_id)s/users/%(user_id)s/roles' % { 'domain_id': self.domainA['id'], 'user_id': self.user1['id']}) member_url = '%(collection_url)s/%(role_id)s' % { 'collection_url': collection_url, 'role_id': self.role1['id']} self.delete(member_url, auth=self.auth) def test_list_users_protected_by_domain(self): """GET /users?domain_id=mydomain (protected) Test Plan: - Update policy to protect api by domain_id - List groups using a token scoped to domainA with a filter specifying domainA - we should only get back the one user that is in domainA. - Try and read the users from domainB - this should fail since we don't have a token scoped for domainB """ new_policy = {"identity:list_users": ["domain_id:%(domain_id)s"]} self._set_policy(new_policy) self.auth = self.build_authentication_request( user_id=self.user1['id'], password=self.user1['password'], domain_id=self.domainA['id']) url_by_name = '/users?domain_id=%s' % self.domainA['id'] r = self.get(url_by_name, auth=self.auth) # We should only get back one user, the one in DomainA id_list = self._get_id_list_from_ref_list(r.result.get('users')) self.assertEqual(1, len(id_list)) self.assertIn(self.user1['id'], id_list) # Now try for domainB, which should fail url_by_name = '/users?domain_id=%s' % self.domainB['id'] r = self.get(url_by_name, auth=self.auth, expected_status=exception.ForbiddenAction.code) def test_list_groups_protected_by_domain(self): """GET /groups?domain_id=mydomain (protected) Test Plan: - Update policy to protect api by domain_id - List groups using a token scoped to domainA and make sure we only get back the two groups that are in domainA - Try and read the groups from domainB - this should fail since we don't have a token scoped for domainB """ new_policy = {"identity:list_groups": ["domain_id:%(domain_id)s"]} self._set_policy(new_policy) self.auth = self.build_authentication_request( user_id=self.user1['id'], password=self.user1['password'], domain_id=self.domainA['id']) url_by_name = '/groups?domain_id=%s' % self.domainA['id'] r = self.get(url_by_name, auth=self.auth) # We should only get back two groups, the ones in DomainA id_list = self._get_id_list_from_ref_list(r.result.get('groups')) self.assertEqual(2, len(id_list)) self.assertIn(self.group1['id'], id_list) self.assertIn(self.group2['id'], id_list) # Now try for domainB, which should fail url_by_name = '/groups?domain_id=%s' % self.domainB['id'] r = self.get(url_by_name, auth=self.auth, expected_status=exception.ForbiddenAction.code) def test_list_groups_protected_by_domain_and_filtered(self): """GET /groups?domain_id=mydomain&name=myname (protected) Test Plan: - Update policy to protect api by domain_id - List groups using a token scoped to domainA with a filter specifying both domainA and the name of group. - We should only get back the group in domainA that matches the name """ new_policy = {"identity:list_groups": ["domain_id:%(domain_id)s"]} self._set_policy(new_policy) self.auth = self.build_authentication_request( user_id=self.user1['id'], password=self.user1['password'], domain_id=self.domainA['id']) url_by_name = '/groups?domain_id=%s&name=%s' % ( self.domainA['id'], self.group2['name']) r = self.get(url_by_name, auth=self.auth) # We should only get back one user, the one in DomainA that matches # the name supplied id_list = self._get_id_list_from_ref_list(r.result.get('groups')) self.assertEqual(1, len(id_list)) self.assertIn(self.group2['id'], id_list) class IdentityTestPolicySample(test_v3.RestfulTestCase): """Test policy enforcement of the policy.json file.""" def load_sample_data(self): self._populate_default_domain() self.just_a_user = unit.create_user( self.identity_api, domain_id=CONF.identity.default_domain_id) self.another_user = unit.create_user( self.identity_api, domain_id=CONF.identity.default_domain_id) self.admin_user = unit.create_user( self.identity_api, domain_id=CONF.identity.default_domain_id) self.role = unit.new_role_ref() self.role_api.create_role(self.role['id'], self.role) self.admin_role = unit.new_role_ref(name='admin') self.role_api.create_role(self.admin_role['id'], self.admin_role) # Create and assign roles to the project self.project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id) self.resource_api.create_project(self.project['id'], self.project) self.assignment_api.create_grant(self.role['id'], user_id=self.just_a_user['id'], project_id=self.project['id']) self.assignment_api.create_grant(self.role['id'], user_id=self.another_user['id'], project_id=self.project['id']) self.assignment_api.create_grant(self.admin_role['id'], user_id=self.admin_user['id'], project_id=self.project['id']) def test_user_validate_same_token(self): # Given a non-admin user token, the token can be used to validate # itself. # This is GET /v3/auth/tokens, with X-Auth-Token == X-Subject-Token auth = self.build_authentication_request( user_id=self.just_a_user['id'], password=self.just_a_user['password']) token = self.get_requested_token(auth) self.get('/auth/tokens', token=token, headers={'X-Subject-Token': token}) def test_user_validate_user_token(self): # A user can validate one of their own tokens. # This is GET /v3/auth/tokens auth = self.build_authentication_request( user_id=self.just_a_user['id'], password=self.just_a_user['password']) token1 = self.get_requested_token(auth) token2 = self.get_requested_token(auth) self.get('/auth/tokens', token=token1, headers={'X-Subject-Token': token2}) def test_user_validate_other_user_token_rejected(self): # A user cannot validate another user's token. # This is GET /v3/auth/tokens user1_auth = self.build_authentication_request( user_id=self.just_a_user['id'], password=self.just_a_user['password']) user1_token = self.get_requested_token(user1_auth) user2_auth = self.build_authentication_request( user_id=self.another_user['id'], password=self.another_user['password']) user2_token = self.get_requested_token(user2_auth) self.get('/auth/tokens', token=user1_token, headers={'X-Subject-Token': user2_token}, expected_status=http_client.FORBIDDEN) def test_admin_validate_user_token(self): # An admin can validate a user's token. # This is GET /v3/auth/tokens admin_auth = self.build_authentication_request( user_id=self.admin_user['id'], password=self.admin_user['password'], project_id=self.project['id']) admin_token = self.get_requested_token(admin_auth) user_auth = self.build_authentication_request( user_id=self.just_a_user['id'], password=self.just_a_user['password']) user_token = self.get_requested_token(user_auth) self.get('/auth/tokens', token=admin_token, headers={'X-Subject-Token': user_token}) def test_user_check_same_token(self): # Given a non-admin user token, the token can be used to check # itself. # This is HEAD /v3/auth/tokens, with X-Auth-Token == X-Subject-Token auth = self.build_authentication_request( user_id=self.just_a_user['id'], password=self.just_a_user['password']) token = self.get_requested_token(auth) self.head('/auth/tokens', token=token, headers={'X-Subject-Token': token}, expected_status=http_client.OK) def test_user_check_user_token(self): # A user can check one of their own tokens. # This is HEAD /v3/auth/tokens auth = self.build_authentication_request( user_id=self.just_a_user['id'], password=self.just_a_user['password']) token1 = self.get_requested_token(auth) token2 = self.get_requested_token(auth) self.head('/auth/tokens', token=token1, headers={'X-Subject-Token': token2}, expected_status=http_client.OK) def test_user_check_other_user_token_rejected(self): # A user cannot check another user's token. # This is HEAD /v3/auth/tokens user1_auth = self.build_authentication_request( user_id=self.just_a_user['id'], password=self.just_a_user['password']) user1_token = self.get_requested_token(user1_auth) user2_auth = self.build_authentication_request( user_id=self.another_user['id'], password=self.another_user['password']) user2_token = self.get_requested_token(user2_auth) self.head('/auth/tokens', token=user1_token, headers={'X-Subject-Token': user2_token}, expected_status=http_client.FORBIDDEN) def test_admin_check_user_token(self): # An admin can check a user's token. # This is HEAD /v3/auth/tokens admin_auth = self.build_authentication_request( user_id=self.admin_user['id'], password=self.admin_user['password'], project_id=self.project['id']) admin_token = self.get_requested_token(admin_auth) user_auth = self.build_authentication_request( user_id=self.just_a_user['id'], password=self.just_a_user['password']) user_token = self.get_requested_token(user_auth) self.head('/auth/tokens', token=admin_token, headers={'X-Subject-Token': user_token}, expected_status=http_client.OK) def test_user_revoke_same_token(self): # Given a non-admin user token, the token can be used to revoke # itself. # This is DELETE /v3/auth/tokens, with X-Auth-Token == X-Subject-Token auth = self.build_authentication_request( user_id=self.just_a_user['id'], password=self.just_a_user['password']) token = self.get_requested_token(auth) self.delete('/auth/tokens', token=token, headers={'X-Subject-Token': token}) def test_user_revoke_user_token(self): # A user can revoke one of their own tokens. # This is DELETE /v3/auth/tokens auth = self.build_authentication_request( user_id=self.just_a_user['id'], password=self.just_a_user['password']) token1 = self.get_requested_token(auth) token2 = self.get_requested_token(auth) self.delete('/auth/tokens', token=token1, headers={'X-Subject-Token': token2}) def test_user_revoke_other_user_token_rejected(self): # A user cannot revoke another user's token. # This is DELETE /v3/auth/tokens user1_auth = self.build_authentication_request( user_id=self.just_a_user['id'], password=self.just_a_user['password']) user1_token = self.get_requested_token(user1_auth) user2_auth = self.build_authentication_request( user_id=self.another_user['id'], password=self.another_user['password']) user2_token = self.get_requested_token(user2_auth) self.delete('/auth/tokens', token=user1_token, headers={'X-Subject-Token': user2_token}, expected_status=http_client.FORBIDDEN) def test_admin_revoke_user_token(self): # An admin can revoke a user's token. # This is DELETE /v3/auth/tokens admin_auth = self.build_authentication_request( user_id=self.admin_user['id'], password=self.admin_user['password'], project_id=self.project['id']) admin_token = self.get_requested_token(admin_auth) user_auth = self.build_authentication_request( user_id=self.just_a_user['id'], password=self.just_a_user['password']) user_token = self.get_requested_token(user_auth) self.delete('/auth/tokens', token=admin_token, headers={'X-Subject-Token': user_token}) class IdentityTestv3CloudPolicySample(test_v3.RestfulTestCase, test_v3.AssignmentTestMixin): """Test policy enforcement of the sample v3 cloud policy file.""" def _policy_fixture(self): return ksfixtures.Policy(unit.dirs.etc('policy.v3cloudsample.json'), self.config_fixture) def setUp(self): """Setup for v3 Cloud Policy Sample Test Cases. The following data is created: - Three domains: domainA, domainB and admin_domain - One project, which name is 'project' - domainA has three users: domain_admin_user, project_admin_user and just_a_user: - domain_admin_user has role 'admin' on domainA, - project_admin_user has role 'admin' on the project, - just_a_user has a non-admin role on both domainA and the project. - admin_domain has admin_project, and user cloud_admin_user, with an 'admin' role on admin_project. We test various api protection rules from the cloud sample policy file to make sure the sample is valid and that we correctly enforce it. """ # Ensure that test_v3.RestfulTestCase doesn't load its own # sample data, which would make checking the results of our # tests harder super(IdentityTestv3CloudPolicySample, self).setUp() self.config_fixture.config( group='resource', admin_project_name=self.admin_project['name']) self.config_fixture.config( group='resource', admin_project_domain_name=self.admin_domain['name']) def load_sample_data(self): # Start by creating a couple of domains self._populate_default_domain() self.domainA = unit.new_domain_ref() self.resource_api.create_domain(self.domainA['id'], self.domainA) self.domainB = unit.new_domain_ref() self.resource_api.create_domain(self.domainB['id'], self.domainB) self.admin_domain = unit.new_domain_ref() self.resource_api.create_domain(self.admin_domain['id'], self.admin_domain) self.admin_project = unit.new_project_ref( domain_id=self.admin_domain['id']) self.resource_api.create_project(self.admin_project['id'], self.admin_project) # And our users self.cloud_admin_user = unit.create_user( self.identity_api, domain_id=self.admin_domain['id']) self.just_a_user = unit.create_user( self.identity_api, domain_id=self.domainA['id']) self.domain_admin_user = unit.create_user( self.identity_api, domain_id=self.domainA['id']) self.domainB_admin_user = unit.create_user( self.identity_api, domain_id=self.domainB['id']) self.project_admin_user = unit.create_user( self.identity_api, domain_id=self.domainA['id']) self.project_adminB_user = unit.create_user( self.identity_api, domain_id=self.domainB['id']) # The admin role, a domain specific role and another plain role self.admin_role = unit.new_role_ref(name='admin') self.role_api.create_role(self.admin_role['id'], self.admin_role) self.roleA = unit.new_role_ref(domain_id=self.domainA['id']) self.role_api.create_role(self.roleA['id'], self.roleA) self.role = unit.new_role_ref() self.role_api.create_role(self.role['id'], self.role) # The cloud admin just gets the admin role on the special admin project self.assignment_api.create_grant(self.admin_role['id'], user_id=self.cloud_admin_user['id'], project_id=self.admin_project['id']) # Assign roles to the domain self.assignment_api.create_grant(self.admin_role['id'], user_id=self.domain_admin_user['id'], domain_id=self.domainA['id']) self.assignment_api.create_grant(self.role['id'], user_id=self.just_a_user['id'], domain_id=self.domainA['id']) self.assignment_api.create_grant(self.admin_role['id'], user_id=self.domainB_admin_user['id'], domain_id=self.domainB['id']) # Create and assign roles to the project self.project = unit.new_project_ref(domain_id=self.domainA['id']) self.resource_api.create_project(self.project['id'], self.project) self.projectB = unit.new_project_ref(domain_id=self.domainB['id']) self.resource_api.create_project(self.projectB['id'], self.projectB) self.assignment_api.create_grant(self.admin_role['id'], user_id=self.project_admin_user['id'], project_id=self.project['id']) self.assignment_api.create_grant( self.admin_role['id'], user_id=self.project_adminB_user['id'], project_id=self.projectB['id']) self.assignment_api.create_grant(self.role['id'], user_id=self.just_a_user['id'], project_id=self.project['id']) def _stati(self, expected_status): # Return the expected return codes for APIs with and without data # with any specified status overriding the normal values if expected_status is None: return (http_client.OK, http_client.CREATED, http_client.NO_CONTENT) else: return (expected_status, expected_status, expected_status) def _test_user_management(self, domain_id, expected=None): status_OK, status_created, status_no_data = self._stati(expected) entity_url = '/users/%s' % self.just_a_user['id'] list_url = '/users?domain_id=%s' % domain_id self.get(entity_url, auth=self.auth, expected_status=status_OK) self.get(list_url, auth=self.auth, expected_status=status_OK) user = {'description': 'Updated'} self.patch(entity_url, auth=self.auth, body={'user': user}, expected_status=status_OK) self.delete(entity_url, auth=self.auth, expected_status=status_no_data) user_ref = unit.new_user_ref(domain_id=domain_id) self.post('/users', auth=self.auth, body={'user': user_ref}, expected_status=status_created) def _test_project_management(self, domain_id, expected=None): status_OK, status_created, status_no_data = self._stati(expected) entity_url = '/projects/%s' % self.project['id'] list_url = '/projects?domain_id=%s' % domain_id self.get(entity_url, auth=self.auth, expected_status=status_OK) self.get(list_url, auth=self.auth, expected_status=status_OK) project = {'description': 'Updated'} self.patch(entity_url, auth=self.auth, body={'project': project}, expected_status=status_OK) self.delete(entity_url, auth=self.auth, expected_status=status_no_data) proj_ref = unit.new_project_ref(domain_id=domain_id) self.post('/projects', auth=self.auth, body={'project': proj_ref}, expected_status=status_created) def _test_domain_management(self, expected=None): status_OK, status_created, status_no_data = self._stati(expected) entity_url = '/domains/%s' % self.domainB['id'] list_url = '/domains' self.get(entity_url, auth=self.auth, expected_status=status_OK) self.get(list_url, auth=self.auth, expected_status=status_OK) domain = {'description': 'Updated', 'enabled': False} self.patch(entity_url, auth=self.auth, body={'domain': domain}, expected_status=status_OK) self.delete(entity_url, auth=self.auth, expected_status=status_no_data) domain_ref = unit.new_domain_ref() self.post('/domains', auth=self.auth, body={'domain': domain_ref}, expected_status=status_created) def _test_grants(self, target, entity_id, role_domain_id=None, list_status_OK=False, expected=None): status_OK, status_created, status_no_data = self._stati(expected) a_role = unit.new_role_ref(domain_id=role_domain_id) self.role_api.create_role(a_role['id'], a_role) collection_url = ( '/%(target)s/%(target_id)s/users/%(user_id)s/roles' % { 'target': target, 'target_id': entity_id, 'user_id': self.just_a_user['id']}) member_url = '%(collection_url)s/%(role_id)s' % { 'collection_url': collection_url, 'role_id': a_role['id']} self.put(member_url, auth=self.auth, expected_status=status_no_data) self.head(member_url, auth=self.auth, expected_status=status_no_data) if list_status_OK: self.get(collection_url, auth=self.auth) else: self.get(collection_url, auth=self.auth, expected_status=status_OK) self.delete(member_url, auth=self.auth, expected_status=status_no_data) def _role_management_cases(self, read_status_OK=False, expected=None): # Set the different status values for different types of call depending # on whether we expect the calls to fail or not. status_OK, status_created, status_no_data = self._stati(expected) entity_url = '/roles/%s' % self.role['id'] list_url = '/roles' if read_status_OK: self.get(entity_url, auth=self.auth) self.get(list_url, auth=self.auth) else: self.get(entity_url, auth=self.auth, expected_status=status_OK) self.get(list_url, auth=self.auth, expected_status=status_OK) role = {'name': 'Updated'} self.patch(entity_url, auth=self.auth, body={'role': role}, expected_status=status_OK) self.delete(entity_url, auth=self.auth, expected_status=status_no_data) role_ref = unit.new_role_ref() self.post('/roles', auth=self.auth, body={'role': role_ref}, expected_status=status_created) def _domain_role_management_cases(self, domain_id, read_status_OK=False, expected=None): # Set the different status values for different types of call depending # on whether we expect the calls to fail or not. status_OK, status_created, status_no_data = self._stati(expected) entity_url = '/roles/%s' % self.roleA['id'] list_url = '/roles?domain_id=%s' % domain_id if read_status_OK: self.get(entity_url, auth=self.auth) self.get(list_url, auth=self.auth) else: self.get(entity_url, auth=self.auth, expected_status=status_OK) self.get(list_url, auth=self.auth, expected_status=status_OK) role = {'name': 'Updated'} self.patch(entity_url, auth=self.auth, body={'role': role}, expected_status=status_OK) self.delete(entity_url, auth=self.auth, expected_status=status_no_data) role_ref = unit.new_role_ref(domain_id=domain_id) self.post('/roles', auth=self.auth, body={'role': role_ref}, expected_status=status_created) def test_user_management(self): # First, authenticate with a user that does not have the domain # admin role - shouldn't be able to do much. self.auth = self.build_authentication_request( user_id=self.just_a_user['id'], password=self.just_a_user['password'], domain_id=self.domainA['id']) self._test_user_management( self.domainA['id'], expected=exception.ForbiddenAction.code) # Now, authenticate with a user that does have the domain admin role self.auth = self.build_authentication_request( user_id=self.domain_admin_user['id'], password=self.domain_admin_user['password'], domain_id=self.domainA['id']) self._test_user_management(self.domainA['id']) def test_user_management_normalized_keys(self): """Illustrate the inconsistent handling of hyphens in keys. To quote Morgan in bug 1526244: the reason this is converted from "domain-id" to "domain_id" is because of how we process/normalize data. The way we have to handle specific data types for known columns requires avoiding "-" in the actual python code since "-" is not valid for attributes in python w/o significant use of "getattr" etc. In short, historically we handle some things in conversions. The use of "extras" has long been a poor design choice that leads to odd/strange inconsistent behaviors because of other choices made in handling data from within the body. (In many cases we convert from "-" to "_" throughout openstack) Source: https://bugs.launchpad.net/keystone/+bug/1526244/comments/9 """ # Authenticate with a user that has the domain admin role self.auth = self.build_authentication_request( user_id=self.domain_admin_user['id'], password=self.domain_admin_user['password'], domain_id=self.domainA['id']) # Show that we can read a normal user without any surprises. r = self.get( '/users/%s' % self.just_a_user['id'], auth=self.auth, expected_status=http_client.OK) self.assertValidUserResponse(r) # We don't normalize query string keys, so both of these result in a # 403, because we didn't specify a domain_id query string in either # case, and we explicitly require one (it doesn't matter what # 'domain-id' value you use). self.get( '/users?domain-id=%s' % self.domainA['id'], auth=self.auth, expected_status=exception.ForbiddenAction.code) self.get( '/users?domain-id=%s' % self.domainB['id'], auth=self.auth, expected_status=exception.ForbiddenAction.code) # If we try updating the user's 'domain_id' by specifying a # 'domain-id', then it'll be stored into extras rather than normalized, # and the user's actual 'domain_id' is not affected. r = self.patch( '/users/%s' % self.just_a_user['id'], auth=self.auth, body={'user': {'domain-id': self.domainB['id']}}, expected_status=http_client.OK) self.assertEqual(self.domainB['id'], r.json['user']['domain-id']) self.assertEqual(self.domainA['id'], r.json['user']['domain_id']) self.assertNotEqual(self.domainB['id'], self.just_a_user['domain_id']) self.assertValidUserResponse(r, self.just_a_user) # Finally, show that we can create a new user without any surprises. # But if we specify a 'domain-id' instead of a 'domain_id', we get a # Forbidden response because we fail a policy check before # normalization occurs. user_ref = unit.new_user_ref(domain_id=self.domainA['id']) r = self.post( '/users', auth=self.auth, body={'user': user_ref}, expected_status=http_client.CREATED) self.assertValidUserResponse(r, ref=user_ref) user_ref['domain-id'] = user_ref.pop('domain_id') self.post( '/users', auth=self.auth, body={'user': user_ref}, expected_status=exception.ForbiddenAction.code) def test_user_management_by_cloud_admin(self): # Test users management with a cloud admin. This user should # be able to manage users in any domain. self.auth = self.build_authentication_request( user_id=self.cloud_admin_user['id'], password=self.cloud_admin_user['password'], project_id=self.admin_project['id']) self._test_user_management(self.domainA['id']) def test_project_management(self): # First, authenticate with a user that does not have the project # admin role - shouldn't be able to do much. self.auth = self.build_authentication_request( user_id=self.just_a_user['id'], password=self.just_a_user['password'], domain_id=self.domainA['id']) self._test_project_management( self.domainA['id'], expected=exception.ForbiddenAction.code) # ...but should still be able to list projects of which they are # a member url = '/users/%s/projects' % self.just_a_user['id'] self.get(url, auth=self.auth) # Now, authenticate with a user that does have the domain admin role self.auth = self.build_authentication_request( user_id=self.domain_admin_user['id'], password=self.domain_admin_user['password'], domain_id=self.domainA['id']) self._test_project_management(self.domainA['id']) def test_project_management_by_cloud_admin(self): self.auth = self.build_authentication_request( user_id=self.cloud_admin_user['id'], password=self.cloud_admin_user['password'], project_id=self.admin_project['id']) # Check whether cloud admin can operate a domain # other than its own domain or not self._test_project_management(self.domainA['id']) def test_domain_grants(self): self.auth = self.build_authentication_request( user_id=self.just_a_user['id'], password=self.just_a_user['password'], domain_id=self.domainA['id']) self._test_grants('domains', self.domainA['id'], expected=exception.ForbiddenAction.code) # Now, authenticate with a user that does have the domain admin role self.auth = self.build_authentication_request( user_id=self.domain_admin_user['id'], password=self.domain_admin_user['password'], domain_id=self.domainA['id']) self._test_grants('domains', self.domainA['id']) # Check that with such a token we cannot modify grants on a # different domain self._test_grants('domains', self.domainB['id'], expected=exception.ForbiddenAction.code) def test_domain_grants_by_cloud_admin(self): # Test domain grants with a cloud admin. This user should be # able to manage roles on any domain. self.auth = self.build_authentication_request( user_id=self.cloud_admin_user['id'], password=self.cloud_admin_user['password'], project_id=self.admin_project['id']) self._test_grants('domains', self.domainA['id']) def test_domain_grants_by_cloud_admin_for_domain_specific_role(self): # Test domain grants with a cloud admin. This user should be # able to manage domain roles on any domain. self.auth = self.build_authentication_request( user_id=self.cloud_admin_user['id'], password=self.cloud_admin_user['password'], project_id=self.admin_project['id']) self._test_grants('domains', self.domainA['id'], role_domain_id=self.domainB['id']) def test_domain_grants_by_non_admin_for_domain_specific_role(self): # A non-admin shouldn't be able to do anything self.auth = self.build_authentication_request( user_id=self.just_a_user['id'], password=self.just_a_user['password'], domain_id=self.domainA['id']) self._test_grants('domains', self.domainA['id'], role_domain_id=self.domainA['id'], expected=exception.ForbiddenAction.code) self._test_grants('domains', self.domainA['id'], role_domain_id=self.domainB['id'], expected=exception.ForbiddenAction.code) def test_domain_grants_by_domain_admin_for_domain_specific_role(self): # Authenticate with a user that does have the domain admin role, # should not be able to assign a domain_specific role from another # domain self.auth = self.build_authentication_request( user_id=self.domain_admin_user['id'], password=self.domain_admin_user['password'], domain_id=self.domainA['id']) self._test_grants('domains', self.domainA['id'], role_domain_id=self.domainB['id'], # List status will always be OK, since we are not # granting/checking/deleting assignments list_status_OK=True, expected=exception.ForbiddenAction.code) # They should be able to assign a domain specific role from the same # domain self._test_grants('domains', self.domainA['id'], role_domain_id=self.domainA['id']) def test_project_grants(self): self.auth = self.build_authentication_request( user_id=self.just_a_user['id'], password=self.just_a_user['password'], project_id=self.project['id']) self._test_grants('projects', self.project['id'], expected=exception.ForbiddenAction.code) # Now, authenticate with a user that does have the project # admin role self.auth = self.build_authentication_request( user_id=self.project_admin_user['id'], password=self.project_admin_user['password'], project_id=self.project['id']) self._test_grants('projects', self.project['id']) def test_project_grants_by_domain_admin(self): # Test project grants with a domain admin. This user should be # able to manage roles on any project in its own domain. self.auth = self.build_authentication_request( user_id=self.domain_admin_user['id'], password=self.domain_admin_user['password'], domain_id=self.domainA['id']) self._test_grants('projects', self.project['id']) def test_project_grants_by_non_admin_for_domain_specific_role(self): # A non-admin shouldn't be able to do anything self.auth = self.build_authentication_request( user_id=self.just_a_user['id'], password=self.just_a_user['password'], project_id=self.project['id']) self._test_grants('projects', self.project['id'], role_domain_id=self.domainA['id'], expected=exception.ForbiddenAction.code) self._test_grants('projects', self.project['id'], role_domain_id=self.domainB['id'], expected=exception.ForbiddenAction.code) def test_project_grants_by_project_admin_for_domain_specific_role(self): # Authenticate with a user that does have the project admin role, # should not be able to assign a domain_specific role from another # domain self.auth = self.build_authentication_request( user_id=self.project_admin_user['id'], password=self.project_admin_user['password'], project_id=self.project['id']) self._test_grants('projects', self.project['id'], role_domain_id=self.domainB['id'], # List status will always be OK, since we are not # granting/checking/deleting assignments list_status_OK=True, expected=exception.ForbiddenAction.code) # They should be able to assign a domain specific role from the same # domain self._test_grants('projects', self.project['id'], role_domain_id=self.domainA['id']) def test_project_grants_by_domain_admin_for_domain_specific_role(self): # Authenticate with a user that does have the domain admin role, # should not be able to assign a domain_specific role from another # domain self.auth = self.build_authentication_request( user_id=self.domain_admin_user['id'], password=self.domain_admin_user['password'], domain_id=self.domainA['id']) self._test_grants('projects', self.project['id'], role_domain_id=self.domainB['id'], # List status will always be OK, since we are not # granting/checking/deleting assignments list_status_OK=True, expected=exception.ForbiddenAction.code) # They should be able to assign a domain specific role from the same # domain self._test_grants('projects', self.project['id'], role_domain_id=self.domainA['id']) def test_cloud_admin_list_assignments_of_domain(self): self.auth = self.build_authentication_request( user_id=self.cloud_admin_user['id'], password=self.cloud_admin_user['password'], project_id=self.admin_project['id']) collection_url = self.build_role_assignment_query_url( domain_id=self.domainA['id']) r = self.get(collection_url, auth=self.auth) self.assertValidRoleAssignmentListResponse( r, expected_length=2, resource_url=collection_url) domainA_admin_entity = self.build_role_assignment_entity( domain_id=self.domainA['id'], user_id=self.domain_admin_user['id'], role_id=self.admin_role['id'], inherited_to_projects=False) domainA_user_entity = self.build_role_assignment_entity( domain_id=self.domainA['id'], user_id=self.just_a_user['id'], role_id=self.role['id'], inherited_to_projects=False) self.assertRoleAssignmentInListResponse(r, domainA_admin_entity) self.assertRoleAssignmentInListResponse(r, domainA_user_entity) def test_domain_admin_list_assignments_of_domain(self): self.auth = self.build_authentication_request( user_id=self.domain_admin_user['id'], password=self.domain_admin_user['password'], domain_id=self.domainA['id']) collection_url = self.build_role_assignment_query_url( domain_id=self.domainA['id']) r = self.get(collection_url, auth=self.auth) self.assertValidRoleAssignmentListResponse( r, expected_length=2, resource_url=collection_url) domainA_admin_entity = self.build_role_assignment_entity( domain_id=self.domainA['id'], user_id=self.domain_admin_user['id'], role_id=self.admin_role['id'], inherited_to_projects=False) domainA_user_entity = self.build_role_assignment_entity( domain_id=self.domainA['id'], user_id=self.just_a_user['id'], role_id=self.role['id'], inherited_to_projects=False) self.assertRoleAssignmentInListResponse(r, domainA_admin_entity) self.assertRoleAssignmentInListResponse(r, domainA_user_entity) def test_domain_admin_list_assignments_of_another_domain_failed(self): self.auth = self.build_authentication_request( user_id=self.domain_admin_user['id'], password=self.domain_admin_user['password'], domain_id=self.domainA['id']) collection_url = self.build_role_assignment_query_url( domain_id=self.domainB['id']) self.get(collection_url, auth=self.auth, expected_status=http_client.FORBIDDEN) def test_domain_user_list_assignments_of_domain_failed(self): self.auth = self.build_authentication_request( user_id=self.just_a_user['id'], password=self.just_a_user['password'], domain_id=self.domainA['id']) collection_url = self.build_role_assignment_query_url( domain_id=self.domainA['id']) self.get(collection_url, auth=self.auth, expected_status=http_client.FORBIDDEN) def test_cloud_admin_list_assignments_of_project(self): self.auth = self.build_authentication_request( user_id=self.cloud_admin_user['id'], password=self.cloud_admin_user['password'], project_id=self.admin_project['id']) collection_url = self.build_role_assignment_query_url( project_id=self.project['id']) r = self.get(collection_url, auth=self.auth) self.assertValidRoleAssignmentListResponse( r, expected_length=2, resource_url=collection_url) project_admin_entity = self.build_role_assignment_entity( project_id=self.project['id'], user_id=self.project_admin_user['id'], role_id=self.admin_role['id'], inherited_to_projects=False) project_user_entity = self.build_role_assignment_entity( project_id=self.project['id'], user_id=self.just_a_user['id'], role_id=self.role['id'], inherited_to_projects=False) self.assertRoleAssignmentInListResponse(r, project_admin_entity) self.assertRoleAssignmentInListResponse(r, project_user_entity) def test_admin_project_list_assignments_of_project(self): self.auth = self.build_authentication_request( user_id=self.project_admin_user['id'], password=self.project_admin_user['password'], project_id=self.project['id']) collection_url = self.build_role_assignment_query_url( project_id=self.project['id']) r = self.get(collection_url, auth=self.auth) self.assertValidRoleAssignmentListResponse( r, expected_length=2, resource_url=collection_url) project_admin_entity = self.build_role_assignment_entity( project_id=self.project['id'], user_id=self.project_admin_user['id'], role_id=self.admin_role['id'], inherited_to_projects=False) project_user_entity = self.build_role_assignment_entity( project_id=self.project['id'], user_id=self.just_a_user['id'], role_id=self.role['id'], inherited_to_projects=False) self.assertRoleAssignmentInListResponse(r, project_admin_entity) self.assertRoleAssignmentInListResponse(r, project_user_entity) @utils.wip('waiting on bug #1437407') def test_domain_admin_list_assignments_of_project(self): self.auth = self.build_authentication_request( user_id=self.domain_admin_user['id'], password=self.domain_admin_user['password'], domain_id=self.domainA['id']) collection_url = self.build_role_assignment_query_url( project_id=self.project['id']) r = self.get(collection_url, auth=self.auth) self.assertValidRoleAssignmentListResponse( r, expected_length=2, resource_url=collection_url) project_admin_entity = self.build_role_assignment_entity( project_id=self.project['id'], user_id=self.project_admin_user['id'], role_id=self.admin_role['id'], inherited_to_projects=False) project_user_entity = self.build_role_assignment_entity( project_id=self.project['id'], user_id=self.just_a_user['id'], role_id=self.role['id'], inherited_to_projects=False) self.assertRoleAssignmentInListResponse(r, project_admin_entity) self.assertRoleAssignmentInListResponse(r, project_user_entity) def test_domain_admin_list_assignment_tree(self): # Add a child project to the standard test data sub_project = unit.new_project_ref(domain_id=self.domainA['id'], parent_id=self.project['id']) self.resource_api.create_project(sub_project['id'], sub_project) self.assignment_api.create_grant(self.role['id'], user_id=self.just_a_user['id'], project_id=sub_project['id']) collection_url = self.build_role_assignment_query_url( project_id=self.project['id']) collection_url += '&include_subtree=True' # The domain admin should be able to list the assignment tree auth = self.build_authentication_request( user_id=self.domain_admin_user['id'], password=self.domain_admin_user['password'], domain_id=self.domainA['id']) r = self.get(collection_url, auth=auth) self.assertValidRoleAssignmentListResponse( r, expected_length=3, resource_url=collection_url) # A project admin should not be able to auth = self.build_authentication_request( user_id=self.project_admin_user['id'], password=self.project_admin_user['password'], project_id=self.project['id']) r = self.get(collection_url, auth=auth, expected_status=http_client.FORBIDDEN) # A neither should a domain admin from a different domain domainB_admin_user = unit.create_user( self.identity_api, domain_id=self.domainB['id']) self.assignment_api.create_grant(self.admin_role['id'], user_id=domainB_admin_user['id'], domain_id=self.domainB['id']) auth = self.build_authentication_request( user_id=domainB_admin_user['id'], password=domainB_admin_user['password'], domain_id=self.domainB['id']) r = self.get(collection_url, auth=auth, expected_status=http_client.FORBIDDEN) def test_domain_user_list_assignments_of_project_failed(self): self.auth = self.build_authentication_request( user_id=self.just_a_user['id'], password=self.just_a_user['password'], domain_id=self.domainA['id']) collection_url = self.build_role_assignment_query_url( project_id=self.project['id']) self.get(collection_url, auth=self.auth, expected_status=http_client.FORBIDDEN) def test_cloud_admin(self): self.auth = self.build_authentication_request( user_id=self.domain_admin_user['id'], password=self.domain_admin_user['password'], domain_id=self.domainA['id']) self._test_domain_management( expected=exception.ForbiddenAction.code) self.auth = self.build_authentication_request( user_id=self.cloud_admin_user['id'], password=self.cloud_admin_user['password'], project_id=self.admin_project['id']) self._test_domain_management() def test_admin_project(self): self.auth = self.build_authentication_request( user_id=self.project_admin_user['id'], password=self.project_admin_user['password'], project_id=self.project['id']) self._test_domain_management( expected=exception.ForbiddenAction.code) self.auth = self.build_authentication_request( user_id=self.cloud_admin_user['id'], password=self.cloud_admin_user['password'], project_id=self.admin_project['id']) self._test_domain_management() def test_domain_admin_get_domain(self): self.auth = self.build_authentication_request( user_id=self.domain_admin_user['id'], password=self.domain_admin_user['password'], domain_id=self.domainA['id']) entity_url = '/domains/%s' % self.domainA['id'] self.get(entity_url, auth=self.auth) def test_list_user_credentials(self): credential_user = unit.new_credential_ref(self.just_a_user['id']) self.credential_api.create_credential(credential_user['id'], credential_user) credential_admin = unit.new_credential_ref(self.cloud_admin_user['id']) self.credential_api.create_credential(credential_admin['id'], credential_admin) self.auth = self.build_authentication_request( user_id=self.just_a_user['id'], password=self.just_a_user['password']) url = '/credentials?user_id=%s' % self.just_a_user['id'] self.get(url, auth=self.auth) url = '/credentials?user_id=%s' % self.cloud_admin_user['id'] self.get(url, auth=self.auth, expected_status=exception.ForbiddenAction.code) url = '/credentials' self.get(url, auth=self.auth, expected_status=exception.ForbiddenAction.code) def test_get_and_delete_ec2_credentials(self): """Tests getting and deleting ec2 credentials through the ec2 API.""" another_user = unit.create_user(self.identity_api, domain_id=self.domainA['id']) # create a credential for just_a_user just_user_auth = self.build_authentication_request( user_id=self.just_a_user['id'], password=self.just_a_user['password'], project_id=self.project['id']) url = '/users/%s/credentials/OS-EC2' % self.just_a_user['id'] r = self.post(url, body={'tenant_id': self.project['id']}, auth=just_user_auth) # another normal user can't get the credential another_user_auth = self.build_authentication_request( user_id=another_user['id'], password=another_user['password']) another_user_url = '/users/%s/credentials/OS-EC2/%s' % ( another_user['id'], r.result['credential']['access']) self.get(another_user_url, auth=another_user_auth, expected_status=exception.ForbiddenAction.code) # the owner can get the credential just_user_url = '/users/%s/credentials/OS-EC2/%s' % ( self.just_a_user['id'], r.result['credential']['access']) self.get(just_user_url, auth=just_user_auth) # another normal user can't delete the credential self.delete(another_user_url, auth=another_user_auth, expected_status=exception.ForbiddenAction.code) # the owner can get the credential self.delete(just_user_url, auth=just_user_auth) def test_user_validate_same_token(self): # Given a non-admin user token, the token can be used to validate # itself. # This is GET /v3/auth/tokens, with X-Auth-Token == X-Subject-Token auth = self.build_authentication_request( user_id=self.just_a_user['id'], password=self.just_a_user['password']) token = self.get_requested_token(auth) self.get('/auth/tokens', token=token, headers={'X-Subject-Token': token}) def test_user_validate_user_token(self): # A user can validate one of their own tokens. # This is GET /v3/auth/tokens auth = self.build_authentication_request( user_id=self.just_a_user['id'], password=self.just_a_user['password']) token1 = self.get_requested_token(auth) token2 = self.get_requested_token(auth) self.get('/auth/tokens', token=token1, headers={'X-Subject-Token': token2}) def test_user_validate_other_user_token_rejected(self): # A user cannot validate another user's token. # This is GET /v3/auth/tokens user1_auth = self.build_authentication_request( user_id=self.just_a_user['id'], password=self.just_a_user['password']) user1_token = self.get_requested_token(user1_auth) user2_auth = self.build_authentication_request( user_id=self.cloud_admin_user['id'], password=self.cloud_admin_user['password']) user2_token = self.get_requested_token(user2_auth) self.get('/auth/tokens', token=user1_token, headers={'X-Subject-Token': user2_token}, expected_status=http_client.FORBIDDEN) def test_admin_validate_user_token(self): # An admin can validate a user's token. # This is GET /v3/auth/tokens admin_auth = self.build_authentication_request( user_id=self.cloud_admin_user['id'], password=self.cloud_admin_user['password'], project_id=self.admin_project['id']) admin_token = self.get_requested_token(admin_auth) user_auth = self.build_authentication_request( user_id=self.just_a_user['id'], password=self.just_a_user['password']) user_token = self.get_requested_token(user_auth) self.get('/auth/tokens', token=admin_token, headers={'X-Subject-Token': user_token}) def test_admin_project_validate_user_token(self): # An admin can validate a user's token. # This is GET /v3/auth/tokens admin_auth = self.build_authentication_request( user_id=self.project_admin_user['id'], password=self.project_admin_user['password'], project_id=self.project['id']) admin_token = self.get_requested_token(admin_auth) user_auth = self.build_authentication_request( user_id=self.just_a_user['id'], password=self.just_a_user['password']) user_token = self.get_requested_token(user_auth) self.get('/auth/tokens', token=admin_token, headers={'X-Subject-Token': user_token}) def test_user_check_same_token(self): # Given a non-admin user token, the token can be used to check # itself. # This is HEAD /v3/auth/tokens, with X-Auth-Token == X-Subject-Token auth = self.build_authentication_request( user_id=self.just_a_user['id'], password=self.just_a_user['password']) token = self.get_requested_token(auth) self.head('/auth/tokens', token=token, headers={'X-Subject-Token': token}, expected_status=http_client.OK) def test_user_check_user_token(self): # A user can check one of their own tokens. # This is HEAD /v3/auth/tokens auth = self.build_authentication_request( user_id=self.just_a_user['id'], password=self.just_a_user['password']) token1 = self.get_requested_token(auth) token2 = self.get_requested_token(auth) self.head('/auth/tokens', token=token1, headers={'X-Subject-Token': token2}, expected_status=http_client.OK) def test_user_check_other_user_token_rejected(self): # A user cannot check another user's token. # This is HEAD /v3/auth/tokens user1_auth = self.build_authentication_request( user_id=self.just_a_user['id'], password=self.just_a_user['password']) user1_token = self.get_requested_token(user1_auth) user2_auth = self.build_authentication_request( user_id=self.cloud_admin_user['id'], password=self.cloud_admin_user['password']) user2_token = self.get_requested_token(user2_auth) self.head('/auth/tokens', token=user1_token, headers={'X-Subject-Token': user2_token}, expected_status=http_client.FORBIDDEN) def test_admin_check_user_token(self): # An admin can check a user's token. # This is HEAD /v3/auth/tokens admin_auth = self.build_authentication_request( user_id=self.domain_admin_user['id'], password=self.domain_admin_user['password'], domain_id=self.domainA['id']) admin_token = self.get_requested_token(admin_auth) user_auth = self.build_authentication_request( user_id=self.just_a_user['id'], password=self.just_a_user['password']) user_token = self.get_requested_token(user_auth) self.head('/auth/tokens', token=admin_token, headers={'X-Subject-Token': user_token}, expected_status=http_client.OK) def test_user_revoke_same_token(self): # Given a non-admin user token, the token can be used to revoke # itself. # This is DELETE /v3/auth/tokens, with X-Auth-Token == X-Subject-Token auth = self.build_authentication_request( user_id=self.just_a_user['id'], password=self.just_a_user['password']) token = self.get_requested_token(auth) self.delete('/auth/tokens', token=token, headers={'X-Subject-Token': token}) def test_user_revoke_user_token(self): # A user can revoke one of their own tokens. # This is DELETE /v3/auth/tokens auth = self.build_authentication_request( user_id=self.just_a_user['id'], password=self.just_a_user['password']) token1 = self.get_requested_token(auth) token2 = self.get_requested_token(auth) self.delete('/auth/tokens', token=token1, headers={'X-Subject-Token': token2}) def test_user_revoke_other_user_token_rejected(self): # A user cannot revoke another user's token. # This is DELETE /v3/auth/tokens user1_auth = self.build_authentication_request( user_id=self.just_a_user['id'], password=self.just_a_user['password']) user1_token = self.get_requested_token(user1_auth) user2_auth = self.build_authentication_request( user_id=self.cloud_admin_user['id'], password=self.cloud_admin_user['password']) user2_token = self.get_requested_token(user2_auth) self.delete('/auth/tokens', token=user1_token, headers={'X-Subject-Token': user2_token}, expected_status=http_client.FORBIDDEN) def test_admin_revoke_user_token(self): # An admin can revoke a user's token. # This is DELETE /v3/auth/tokens admin_auth = self.build_authentication_request( user_id=self.domain_admin_user['id'], password=self.domain_admin_user['password'], domain_id=self.domainA['id']) admin_token = self.get_requested_token(admin_auth) user_auth = self.build_authentication_request( user_id=self.just_a_user['id'], password=self.just_a_user['password']) user_token = self.get_requested_token(user_auth) self.delete('/auth/tokens', token=admin_token, headers={'X-Subject-Token': user_token}) def test_user_with_a_role_get_project(self): user_auth = self.build_authentication_request( user_id=self.just_a_user['id'], password=self.just_a_user['password'], project_id=self.project['id']) # Test user can get project for one they have a role in self.get('/projects/%s' % self.project['id'], auth=user_auth) # Test user can not get project for one they don't have a role in, # even if they have a role on another project project2 = unit.new_project_ref(domain_id=self.domainA['id']) self.resource_api.create_project(project2['id'], project2) self.get('/projects/%s' % project2['id'], auth=user_auth, expected_status=exception.ForbiddenAction.code) def test_project_admin_get_project(self): admin_auth = self.build_authentication_request( user_id=self.project_admin_user['id'], password=self.project_admin_user['password'], project_id=self.project['id']) resp = self.get('/projects/%s' % self.project['id'], auth=admin_auth) self.assertEqual(self.project['id'], jsonutils.loads(resp.body)['project']['id']) def test_role_management_no_admin_no_rights(self): # A non-admin domain user shouldn't be able to manipulate roles self.auth = self.build_authentication_request( user_id=self.just_a_user['id'], password=self.just_a_user['password'], domain_id=self.domainA['id']) self._role_management_cases(expected=exception.ForbiddenAction.code) # ...and nor should non-admin project user self.auth = self.build_authentication_request( user_id=self.just_a_user['id'], password=self.just_a_user['password'], project_id=self.project['id']) self._role_management_cases(expected=exception.ForbiddenAction.code) def test_role_management_with_project_admin(self): # A project admin user should be able to get and list, but not be able # to create/update/delete global roles self.auth = self.build_authentication_request( user_id=self.project_admin_user['id'], password=self.project_admin_user['password'], project_id=self.project['id']) self._role_management_cases(read_status_OK=True, expected=exception.ForbiddenAction.code) def test_role_management_with_domain_admin(self): # A domain admin user should be able to get and list, but not be able # to create/update/delete global roles self.auth = self.build_authentication_request( user_id=self.domain_admin_user['id'], password=self.domain_admin_user['password'], domain_id=self.domainA['id']) self._role_management_cases(read_status_OK=True, expected=exception.ForbiddenAction.code) def test_role_management_with_cloud_admin(self): # A cloud admin user should have rights to manipulate global roles self.auth = self.build_authentication_request( user_id=self.cloud_admin_user['id'], password=self.cloud_admin_user['password'], project_id=self.admin_project['id']) self._role_management_cases() def test_domain_role_management_no_admin_no_rights(self): # A non-admin domain user shouldn't be able to manipulate domain roles self.auth = self.build_authentication_request( user_id=self.just_a_user['id'], password=self.just_a_user['password'], domain_id=self.domainA['id']) self._domain_role_management_cases( self.domainA['id'], expected=exception.ForbiddenAction.code) # ...and nor should non-admin project user self.auth = self.build_authentication_request( user_id=self.just_a_user['id'], password=self.just_a_user['password'], project_id=self.project['id']) self._domain_role_management_cases( self.domainA['id'], expected=exception.ForbiddenAction.code) def test_domain_role_management_with_cloud_admin(self): # A cloud admin user should have rights to manipulate domain roles self.auth = self.build_authentication_request( user_id=self.cloud_admin_user['id'], password=self.cloud_admin_user['password'], project_id=self.admin_project['id']) self._domain_role_management_cases(self.domainA['id']) def test_domain_role_management_with_domain_admin(self): # A domain admin user should only be able to manipulate the domain # specific roles in their own domain self.auth = self.build_authentication_request( user_id=self.domainB_admin_user['id'], password=self.domainB_admin_user['password'], domain_id=self.domainB['id']) # Try to access the domain specific roles in another domain self._domain_role_management_cases( self.domainA['id'], expected=exception.ForbiddenAction.code) # ...but they should be able to work with those in their own domain self.auth = self.build_authentication_request( user_id=self.domain_admin_user['id'], password=self.domain_admin_user['password'], domain_id=self.domainA['id']) self._domain_role_management_cases(self.domainA['id']) def test_domain_role_management_with_project_admin(self): # A project admin user should have not access to domain specific roles # in another domain. They should be able to get and list domain # specific roles from their own domain, but not be able to create, # update or delete them, self.auth = self.build_authentication_request( user_id=self.project_adminB_user['id'], password=self.project_adminB_user['password'], project_id=self.projectB['id']) # Try access the domain specific roless in another domain self._domain_role_management_cases( self.domainA['id'], expected=exception.ForbiddenAction.code) # ...but they should be ablet to work with those in their own domain self.auth = self.build_authentication_request( user_id=self.project_admin_user['id'], password=self.project_admin_user['password'], project_id=self.project['id']) self._domain_role_management_cases( self.domainA['id'], read_status_OK=True, expected=exception.ForbiddenAction.code) keystone-9.0.0/keystone/tests/unit/test_ldap_tls_livetest.py0000664000567000056710000001034612701407102025634 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ldap.modlist from oslo_config import cfg from keystone import exception from keystone import identity from keystone.tests import unit from keystone.tests.unit import test_ldap_livetest CONF = cfg.CONF def create_object(dn, attrs): conn = ldap.initialize(CONF.ldap.url) conn.simple_bind_s(CONF.ldap.user, CONF.ldap.password) ldif = ldap.modlist.addModlist(attrs) conn.add_s(dn, ldif) conn.unbind_s() class LiveTLSLDAPIdentity(test_ldap_livetest.LiveLDAPIdentity): def _ldap_skip_live(self): self.skip_if_env_not_set('ENABLE_TLS_LDAP_LIVE_TEST') def config_files(self): config_files = super(LiveTLSLDAPIdentity, self).config_files() config_files.append(unit.dirs.tests_conf('backend_tls_liveldap.conf')) return config_files def test_tls_certfile_demand_option(self): self.config_fixture.config(group='ldap', use_tls=True, tls_cacertdir=None, tls_req_cert='demand') self.identity_api = identity.backends.ldap.Identity() # TODO(shaleh): use new_user_ref() user = {'name': 'fake1', 'password': 'fakepass1', 'tenants': ['bar']} user = self.identity_api.create_user('user') user_ref = self.identity_api.get_user(user['id']) self.assertEqual(user['id'], user_ref['id']) user['password'] = 'fakepass2' self.identity_api.update_user(user['id'], user) self.identity_api.delete_user(user['id']) self.assertRaises(exception.UserNotFound, self.identity_api.get_user, user['id']) def test_tls_certdir_demand_option(self): self.config_fixture.config(group='ldap', use_tls=True, tls_cacertdir=None, tls_req_cert='demand') self.identity_api = identity.backends.ldap.Identity() # TODO(shaleh): use new_user_ref() user = {'id': 'fake1', 'name': 'fake1', 'password': 'fakepass1', 'tenants': ['bar']} self.identity_api.create_user('fake1', user) user_ref = self.identity_api.get_user('fake1') self.assertEqual('fake1', user_ref['id']) user['password'] = 'fakepass2' self.identity_api.update_user('fake1', user) self.identity_api.delete_user('fake1') self.assertRaises(exception.UserNotFound, self.identity_api.get_user, 'fake1') def test_tls_bad_certfile(self): self.config_fixture.config( group='ldap', use_tls=True, tls_req_cert='demand', tls_cacertfile='/etc/keystone/ssl/certs/mythicalcert.pem', tls_cacertdir=None) self.identity_api = identity.backends.ldap.Identity() # TODO(shaleh): use new_user_ref() user = {'name': 'fake1', 'password': 'fakepass1', 'tenants': ['bar']} self.assertRaises(IOError, self.identity_api.create_user, user) def test_tls_bad_certdir(self): self.config_fixture.config( group='ldap', use_tls=True, tls_cacertfile=None, tls_req_cert='demand', tls_cacertdir='/etc/keystone/ssl/mythicalcertdir') self.identity_api = identity.backends.ldap.Identity() # TODO(shaleh): use new_user_ref() user = {'name': 'fake1', 'password': 'fakepass1', 'tenants': ['bar']} self.assertRaises(IOError, self.identity_api.create_user, user) keystone-9.0.0/keystone/tests/unit/test_backend_id_mapping_sql.py0000664000567000056710000002154612701407102026554 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- # Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from testtools import matchers from keystone.common import sql from keystone.identity.mapping_backends import mapping from keystone.tests import unit from keystone.tests.unit import identity_mapping as mapping_sql from keystone.tests.unit import test_backend_sql class SqlIDMappingTable(test_backend_sql.SqlModels): """Set of tests for checking SQL Identity ID Mapping.""" def test_id_mapping(self): cols = (('public_id', sql.String, 64), ('domain_id', sql.String, 64), ('local_id', sql.String, 64), ('entity_type', sql.Enum, None)) self.assertExpectedSchema('id_mapping', cols) class SqlIDMapping(test_backend_sql.SqlTests): def setUp(self): super(SqlIDMapping, self).setUp() self.load_sample_data() def load_sample_data(self): self.addCleanup(self.clean_sample_data) domainA = unit.new_domain_ref() self.domainA = self.resource_api.create_domain(domainA['id'], domainA) domainB = unit.new_domain_ref() self.domainB = self.resource_api.create_domain(domainB['id'], domainB) def clean_sample_data(self): if hasattr(self, 'domainA'): self.domainA['enabled'] = False self.resource_api.update_domain(self.domainA['id'], self.domainA) self.resource_api.delete_domain(self.domainA['id']) if hasattr(self, 'domainB'): self.domainB['enabled'] = False self.resource_api.update_domain(self.domainB['id'], self.domainB) self.resource_api.delete_domain(self.domainB['id']) def test_invalid_public_key(self): self.assertIsNone(self.id_mapping_api.get_id_mapping(uuid.uuid4().hex)) def test_id_mapping_crud(self): initial_mappings = len(mapping_sql.list_id_mappings()) local_id1 = uuid.uuid4().hex local_id2 = uuid.uuid4().hex local_entity1 = {'domain_id': self.domainA['id'], 'local_id': local_id1, 'entity_type': mapping.EntityType.USER} local_entity2 = {'domain_id': self.domainB['id'], 'local_id': local_id2, 'entity_type': mapping.EntityType.GROUP} # Check no mappings for the new local entities self.assertIsNone(self.id_mapping_api.get_public_id(local_entity1)) self.assertIsNone(self.id_mapping_api.get_public_id(local_entity2)) # Create the new mappings and then read them back public_id1 = self.id_mapping_api.create_id_mapping(local_entity1) public_id2 = self.id_mapping_api.create_id_mapping(local_entity2) self.assertThat(mapping_sql.list_id_mappings(), matchers.HasLength(initial_mappings + 2)) self.assertEqual( public_id1, self.id_mapping_api.get_public_id(local_entity1)) self.assertEqual( public_id2, self.id_mapping_api.get_public_id(local_entity2)) local_id_ref = self.id_mapping_api.get_id_mapping(public_id1) self.assertEqual(self.domainA['id'], local_id_ref['domain_id']) self.assertEqual(local_id1, local_id_ref['local_id']) self.assertEqual(mapping.EntityType.USER, local_id_ref['entity_type']) # Check we have really created a new external ID self.assertNotEqual(local_id1, public_id1) local_id_ref = self.id_mapping_api.get_id_mapping(public_id2) self.assertEqual(self.domainB['id'], local_id_ref['domain_id']) self.assertEqual(local_id2, local_id_ref['local_id']) self.assertEqual(mapping.EntityType.GROUP, local_id_ref['entity_type']) # Check we have really created a new external ID self.assertNotEqual(local_id2, public_id2) # Create another mappings, this time specifying a public ID to use new_public_id = uuid.uuid4().hex public_id3 = self.id_mapping_api.create_id_mapping( {'domain_id': self.domainB['id'], 'local_id': local_id2, 'entity_type': mapping.EntityType.USER}, public_id=new_public_id) self.assertEqual(new_public_id, public_id3) self.assertThat(mapping_sql.list_id_mappings(), matchers.HasLength(initial_mappings + 3)) # Delete the mappings we created, and make sure the mapping count # goes back to where it was self.id_mapping_api.delete_id_mapping(public_id1) self.id_mapping_api.delete_id_mapping(public_id2) self.id_mapping_api.delete_id_mapping(public_id3) self.assertThat(mapping_sql.list_id_mappings(), matchers.HasLength(initial_mappings)) def test_id_mapping_handles_unicode(self): initial_mappings = len(mapping_sql.list_id_mappings()) local_id = u'fäké1' local_entity = {'domain_id': self.domainA['id'], 'local_id': local_id, 'entity_type': mapping.EntityType.USER} # Check no mappings for the new local entity self.assertIsNone(self.id_mapping_api.get_public_id(local_entity)) # Create the new mapping and then read it back public_id = self.id_mapping_api.create_id_mapping(local_entity) self.assertThat(mapping_sql.list_id_mappings(), matchers.HasLength(initial_mappings + 1)) self.assertEqual( public_id, self.id_mapping_api.get_public_id(local_entity)) def test_delete_public_id_is_silent(self): # Test that deleting an invalid public key is silent self.id_mapping_api.delete_id_mapping(uuid.uuid4().hex) def test_purge_mappings(self): initial_mappings = len(mapping_sql.list_id_mappings()) local_id1 = uuid.uuid4().hex local_id2 = uuid.uuid4().hex local_id3 = uuid.uuid4().hex local_id4 = uuid.uuid4().hex local_id5 = uuid.uuid4().hex # Create five mappings,two in domainA, three in domainB self.id_mapping_api.create_id_mapping( {'domain_id': self.domainA['id'], 'local_id': local_id1, 'entity_type': mapping.EntityType.USER}) self.id_mapping_api.create_id_mapping( {'domain_id': self.domainA['id'], 'local_id': local_id2, 'entity_type': mapping.EntityType.USER}) public_id3 = self.id_mapping_api.create_id_mapping( {'domain_id': self.domainB['id'], 'local_id': local_id3, 'entity_type': mapping.EntityType.GROUP}) public_id4 = self.id_mapping_api.create_id_mapping( {'domain_id': self.domainB['id'], 'local_id': local_id4, 'entity_type': mapping.EntityType.USER}) public_id5 = self.id_mapping_api.create_id_mapping( {'domain_id': self.domainB['id'], 'local_id': local_id5, 'entity_type': mapping.EntityType.USER}) self.assertThat(mapping_sql.list_id_mappings(), matchers.HasLength(initial_mappings + 5)) # Purge mappings for domainA, should be left with those in B self.id_mapping_api.purge_mappings( {'domain_id': self.domainA['id']}) self.assertThat(mapping_sql.list_id_mappings(), matchers.HasLength(initial_mappings + 3)) self.id_mapping_api.get_id_mapping(public_id3) self.id_mapping_api.get_id_mapping(public_id4) self.id_mapping_api.get_id_mapping(public_id5) # Purge mappings for type Group, should purge one more self.id_mapping_api.purge_mappings( {'entity_type': mapping.EntityType.GROUP}) self.assertThat(mapping_sql.list_id_mappings(), matchers.HasLength(initial_mappings + 2)) self.id_mapping_api.get_id_mapping(public_id4) self.id_mapping_api.get_id_mapping(public_id5) # Purge mapping for a specific local identifier self.id_mapping_api.purge_mappings( {'domain_id': self.domainB['id'], 'local_id': local_id4, 'entity_type': mapping.EntityType.USER}) self.assertThat(mapping_sql.list_id_mappings(), matchers.HasLength(initial_mappings + 1)) self.id_mapping_api.get_id_mapping(public_id5) # Purge mappings the remaining mappings self.id_mapping_api.purge_mappings({}) self.assertThat(mapping_sql.list_id_mappings(), matchers.HasLength(initial_mappings)) keystone-9.0.0/keystone/tests/unit/mapping_fixtures.py0000664000567000056710000010324512701407102024441 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Fixtures for Federation Mapping.""" from six.moves import range, zip EMPLOYEE_GROUP_ID = "0cd5e9" CONTRACTOR_GROUP_ID = "85a868" TESTER_GROUP_ID = "123" TESTER_GROUP_NAME = "tester" DEVELOPER_GROUP_ID = "xyz" DEVELOPER_GROUP_NAME = "Developer" CONTRACTOR_GROUP_NAME = "Contractor" DEVELOPER_GROUP_DOMAIN_NAME = "outsourcing" DEVELOPER_GROUP_DOMAIN_ID = "5abc43" FEDERATED_DOMAIN = "Federated" LOCAL_DOMAIN = "Local" # Mapping summary: # LastName Smith & Not Contractor or SubContractor -> group 0cd5e9 # FirstName Jill & Contractor or SubContractor -> to group 85a868 MAPPING_SMALL = { "rules": [ { "local": [ { "group": { "id": EMPLOYEE_GROUP_ID } }, { "user": { "name": "{0}" } } ], "remote": [ { "type": "UserName" }, { "type": "orgPersonType", "not_any_of": [ "Contractor", "SubContractor" ] }, { "type": "LastName", "any_one_of": [ "Bo" ] } ] }, { "local": [ { "group": { "id": CONTRACTOR_GROUP_ID } }, { "user": { "name": "{0}" } } ], "remote": [ { "type": "UserName" }, { "type": "orgPersonType", "any_one_of": [ "Contractor", "SubContractor" ] }, { "type": "FirstName", "any_one_of": [ "Jill" ] } ] } ] } # Mapping summary: # orgPersonType Admin or Big Cheese -> name {0} {1} email {2} and group 0cd5e9 # orgPersonType Customer -> user name {0} email {1} # orgPersonType Test and email ^@example.com$ -> group 123 and xyz MAPPING_LARGE = { "rules": [ { "local": [ { "user": { "name": "{0} {1}", "email": "{2}" }, "group": { "id": EMPLOYEE_GROUP_ID } } ], "remote": [ { "type": "FirstName" }, { "type": "LastName" }, { "type": "Email" }, { "type": "orgPersonType", "any_one_of": [ "Admin", "Big Cheese" ] } ] }, { "local": [ { "user": { "name": "{0}", "email": "{1}" } } ], "remote": [ { "type": "UserName" }, { "type": "Email" }, { "type": "orgPersonType", "not_any_of": [ "Admin", "Employee", "Contractor", "Tester" ] } ] }, { "local": [ { "group": { "id": TESTER_GROUP_ID } }, { "group": { "id": DEVELOPER_GROUP_ID } }, { "user": { "name": "{0}" } } ], "remote": [ { "type": "UserName" }, { "type": "orgPersonType", "any_one_of": [ "Tester" ] }, { "type": "Email", "any_one_of": [ ".*@example.com$" ], "regex": True } ] } ] } MAPPING_BAD_REQ = { "rules": [ { "local": [ { "user": "name" } ], "remote": [ { "type": "UserName", "bad_requirement": [ "Young" ] } ] } ] } MAPPING_BAD_VALUE = { "rules": [ { "local": [ { "user": "name" } ], "remote": [ { "type": "UserName", "any_one_of": "should_be_list" } ] } ] } MAPPING_NO_RULES = { 'rules': [] } MAPPING_NO_REMOTE = { "rules": [ { "local": [ { "user": "name" } ], "remote": [] } ] } MAPPING_MISSING_LOCAL = { "rules": [ { "remote": [ { "type": "UserName", "any_one_of": "should_be_list" } ] } ] } MAPPING_WRONG_TYPE = { "rules": [ { "local": [ { "user": "{1}" } ], "remote": [ { "not_type": "UserName" } ] } ] } MAPPING_MISSING_TYPE = { "rules": [ { "local": [ { "user": "{1}" } ], "remote": [ {} ] } ] } MAPPING_EXTRA_REMOTE_PROPS_NOT_ANY_OF = { "rules": [ { "local": [ { "group": { "id": "0cd5e9" } }, { "user": { "name": "{0}" } } ], "remote": [ { "type": "UserName" }, { "type": "orgPersonType", "not_any_of": [ "SubContractor" ], "invalid_type": "xyz" } ] } ] } MAPPING_EXTRA_REMOTE_PROPS_ANY_ONE_OF = { "rules": [ { "local": [ { "group": { "id": "0cd5e9" } }, { "user": { "name": "{0}" } } ], "remote": [ { "type": "UserName" }, { "type": "orgPersonType", "any_one_of": [ "SubContractor" ], "invalid_type": "xyz" } ] } ] } MAPPING_EXTRA_REMOTE_PROPS_JUST_TYPE = { "rules": [ { "local": [ { "group": { "id": "0cd5e9" } }, { "user": { "name": "{0}" } } ], "remote": [ { "type": "UserName" }, { "type": "orgPersonType", "invalid_type": "xyz" } ] } ] } MAPPING_EXTRA_RULES_PROPS = { "rules": [ { "local": [ { "group": { "id": "0cd5e9" } }, { "user": { "name": "{0}" } } ], "invalid_type": { "id": "xyz", }, "remote": [ { "type": "UserName" }, { "type": "orgPersonType", "not_any_of": [ "SubContractor" ] } ] } ] } MAPPING_TESTER_REGEX = { "rules": [ { "local": [ { "user": { "name": "{0}", } } ], "remote": [ { "type": "UserName" } ] }, { "local": [ { "group": { "id": TESTER_GROUP_ID } } ], "remote": [ { "type": "orgPersonType", "any_one_of": [ ".*Tester*" ], "regex": True } ] } ] } MAPPING_DIRECT_MAPPING_THROUGH_KEYWORD = { "rules": [ { "local": [ { "user": "{0}" }, { "group": TESTER_GROUP_ID } ], "remote": [ { "type": "UserName", "any_one_of": [ "bwilliams" ] } ] } ] } MAPPING_DEVELOPER_REGEX = { "rules": [ { "local": [ { "user": { "name": "{0}", }, "group": { "id": DEVELOPER_GROUP_ID } } ], "remote": [ { "type": "UserName" }, { "type": "orgPersonType", "any_one_of": [ "Developer" ], }, { "type": "Email", "not_any_of": [ ".*@example.org$" ], "regex": True } ] } ] } MAPPING_GROUP_NAMES = { "rules": [ { "local": [ { "user": { "name": "{0}", } } ], "remote": [ { "type": "UserName" } ] }, { "local": [ { "group": { "name": DEVELOPER_GROUP_NAME, "domain": { "name": DEVELOPER_GROUP_DOMAIN_NAME } } } ], "remote": [ { "type": "orgPersonType", "any_one_of": [ "Employee" ], } ] }, { "local": [ { "group": { "name": TESTER_GROUP_NAME, "domain": { "id": DEVELOPER_GROUP_DOMAIN_ID } } } ], "remote": [ { "type": "orgPersonType", "any_one_of": [ "BuildingX" ] } ] }, ] } MAPPING_EPHEMERAL_USER = { "rules": [ { "local": [ { "user": { "name": "{0}", "domain": { "id": FEDERATED_DOMAIN }, "type": "ephemeral" } } ], "remote": [ { "type": "UserName" }, { "type": "UserName", "any_one_of": [ "tbo" ] } ] } ] } MAPPING_GROUPS_WHITELIST = { "rules": [ { "remote": [ { "type": "orgPersonType", "whitelist": [ "Developer", "Contractor" ] }, { "type": "UserName" } ], "local": [ { "groups": "{0}", "domain": { "id": DEVELOPER_GROUP_DOMAIN_ID } }, { "user": { "name": "{1}" } } ] } ] } MAPPING_EPHEMERAL_USER_LOCAL_DOMAIN = { "rules": [ { "local": [ { "user": { "name": "{0}", "domain": { "id": LOCAL_DOMAIN }, "type": "ephemeral" } } ], "remote": [ { "type": "UserName" }, { "type": "UserName", "any_one_of": [ "jsmith" ] } ] } ] } MAPPING_GROUPS_WHITELIST_MISSING_DOMAIN = { "rules": [ { "remote": [ { "type": "orgPersonType", "whitelist": [ "Developer", "Contractor" ] }, ], "local": [ { "groups": "{0}", } ] } ] } MAPPING_LOCAL_USER_LOCAL_DOMAIN = { "rules": [ { "local": [ { "user": { "name": "{0}", "domain": { "id": LOCAL_DOMAIN }, "type": "local" } } ], "remote": [ { "type": "UserName" }, { "type": "UserName", "any_one_of": [ "jsmith" ] } ] } ] } MAPPING_GROUPS_BLACKLIST_MULTIPLES = { "rules": [ { "remote": [ { "type": "orgPersonType", "blacklist": [ "Developer", "Manager" ] }, { "type": "Thing" # this could be variable length! }, { "type": "UserName" }, ], "local": [ { "groups": "{0}", "domain": { "id": DEVELOPER_GROUP_DOMAIN_ID } }, { "user": { "name": "{2}", } } ] } ] } MAPPING_GROUPS_BLACKLIST = { "rules": [ { "remote": [ { "type": "orgPersonType", "blacklist": [ "Developer", "Manager" ] }, { "type": "UserName" } ], "local": [ { "groups": "{0}", "domain": { "id": DEVELOPER_GROUP_DOMAIN_ID } }, { "user": { "name": "{1}" } } ] } ] } # Exercise all possibilities of user identification. Values are hardcoded on # purpose. MAPPING_USER_IDS = { "rules": [ { "local": [ { "user": { "name": "{0}" } } ], "remote": [ { "type": "UserName" }, { "type": "UserName", "any_one_of": [ "jsmith" ] } ] }, { "local": [ { "user": { "name": "{0}", "id": "abc123@example.com", "domain": { "id": "federated" } } } ], "remote": [ { "type": "UserName" }, { "type": "UserName", "any_one_of": [ "tbo" ] } ] }, { "local": [ { "user": { "id": "{0}" } } ], "remote": [ { "type": "UserName" }, { "type": "UserName", "any_one_of": [ "bob" ] } ] }, { "local": [ { "user": { "id": "abc123@example.com", "name": "{0}", "domain": { "id": "federated" } } } ], "remote": [ { "type": "UserName" }, { "type": "UserName", "any_one_of": [ "bwilliams" ] } ] } ] } MAPPING_GROUPS_BLACKLIST_MISSING_DOMAIN = { "rules": [ { "remote": [ { "type": "orgPersonType", "blacklist": [ "Developer", "Manager" ] }, ], "local": [ { "groups": "{0}", }, ] } ] } MAPPING_GROUPS_WHITELIST_AND_BLACKLIST = { "rules": [ { "remote": [ { "type": "orgPersonType", "blacklist": [ "Employee" ], "whitelist": [ "Contractor" ] }, ], "local": [ { "groups": "{0}", "domain": { "id": DEVELOPER_GROUP_DOMAIN_ID } }, ] } ] } # Mapping used by tokenless test cases, it maps the user_name # and domain_name. MAPPING_WITH_USERNAME_AND_DOMAINNAME = { 'rules': [ { 'local': [ { 'user': { 'name': '{0}', 'domain': { 'name': '{1}' }, 'type': 'local' } } ], 'remote': [ { 'type': 'SSL_CLIENT_USER_NAME' }, { 'type': 'SSL_CLIENT_DOMAIN_NAME' } ] } ] } # Mapping used by tokenless test cases, it maps the user_id # and domain_name. MAPPING_WITH_USERID_AND_DOMAINNAME = { 'rules': [ { 'local': [ { 'user': { 'id': '{0}', 'domain': { 'name': '{1}' }, 'type': 'local' } } ], 'remote': [ { 'type': 'SSL_CLIENT_USER_ID' }, { 'type': 'SSL_CLIENT_DOMAIN_NAME' } ] } ] } # Mapping used by tokenless test cases, it maps the user_name # and domain_id. MAPPING_WITH_USERNAME_AND_DOMAINID = { 'rules': [ { 'local': [ { 'user': { 'name': '{0}', 'domain': { 'id': '{1}' }, 'type': 'local' } } ], 'remote': [ { 'type': 'SSL_CLIENT_USER_NAME' }, { 'type': 'SSL_CLIENT_DOMAIN_ID' } ] } ] } # Mapping used by tokenless test cases, it maps the user_id # and domain_id. MAPPING_WITH_USERID_AND_DOMAINID = { 'rules': [ { 'local': [ { 'user': { 'id': '{0}', 'domain': { 'id': '{1}' }, 'type': 'local' } } ], 'remote': [ { 'type': 'SSL_CLIENT_USER_ID' }, { 'type': 'SSL_CLIENT_DOMAIN_ID' } ] } ] } # Mapping used by tokenless test cases, it maps the domain_id only. MAPPING_WITH_DOMAINID_ONLY = { 'rules': [ { 'local': [ { 'user': { 'domain': { 'id': '{0}' }, 'type': 'local' } } ], 'remote': [ { 'type': 'SSL_CLIENT_DOMAIN_ID' } ] } ] } MAPPING_GROUPS_IDS_WHITELIST = { "rules": [ { "local": [ { "user": { "name": "{0}" } }, { "group_ids": "{1}" }, { "group": { "id": "{2}" } } ], "remote": [ { "type": "name" }, { "type": "group_ids", "whitelist": [ "abc123", "ghi789", "321cba" ] }, { "type": "group" } ] } ] } MAPPING_GROUPS_IDS_BLACKLIST = { "rules": [ { "local": [ { "user": { "name": "{0}" } }, { "group_ids": "{1}" }, { "group": { "id": "{2}" } } ], "remote": [ { "type": "name" }, { "type": "group_ids", "blacklist": [ "def456" ] }, { "type": "group" } ] } ] } # Mapping used by tokenless test cases, it maps the domain_name only. MAPPING_WITH_DOMAINNAME_ONLY = { 'rules': [ { 'local': [ { 'user': { 'domain': { 'name': '{0}' }, 'type': 'local' } } ], 'remote': [ { 'type': 'SSL_CLIENT_DOMAIN_NAME' } ] } ] } # Mapping used by tokenless test cases, it maps the user_name only. MAPPING_WITH_USERNAME_ONLY = { 'rules': [ { 'local': [ { 'user': { 'name': '{0}', 'type': 'local' } } ], 'remote': [ { 'type': 'SSL_CLIENT_USER_NAME' } ] } ] } # Mapping used by tokenless test cases, it maps the user_id only. MAPPING_WITH_USERID_ONLY = { 'rules': [ { 'local': [ { 'user': { 'id': '{0}', 'type': 'local' } } ], 'remote': [ { 'type': 'SSL_CLIENT_USER_ID' } ] } ] } MAPPING_FOR_EPHEMERAL_USER = { 'rules': [ { 'local': [ { 'user': { 'name': '{0}', 'type': 'ephemeral' }, 'group': { 'id': 'dummy' } } ], 'remote': [ { 'type': 'SSL_CLIENT_USER_NAME' } ] } ] } MAPPING_FOR_DEFAULT_EPHEMERAL_USER = { 'rules': [ { 'local': [ { 'user': { 'name': '{0}' }, 'group': { 'id': 'dummy' } } ], 'remote': [ { 'type': 'SSL_CLIENT_USER_NAME' } ] } ] } MAPPING_GROUPS_WHITELIST_PASS_THROUGH = { "rules": [ { "remote": [ { "type": "UserName" } ], "local": [ { "user": { "name": "{0}", "domain": { "id": DEVELOPER_GROUP_DOMAIN_ID } } } ] }, { "remote": [ { "type": "orgPersonType", "whitelist": ['Developer'] } ], "local": [ { "groups": "{0}", "domain": { "id": DEVELOPER_GROUP_DOMAIN_ID } } ] } ] } MAPPING_BAD_LOCAL_SETUP = { "rules": [ { "local": [ { "user": { "name": "{0}", "domain": {"id": "default"} }, "whatisthis": "local" } ], "remote": [ { "type": "UserName" } ] } ] } EMPLOYEE_ASSERTION = { 'Email': 'tim@example.com', 'UserName': 'tbo', 'FirstName': 'Tim', 'LastName': 'Bo', 'orgPersonType': 'Employee;BuildingX' } EMPLOYEE_ASSERTION_MULTIPLE_GROUPS = { 'Email': 'tim@example.com', 'UserName': 'tbo', 'FirstName': 'Tim', 'LastName': 'Bo', 'orgPersonType': 'Developer;Manager;Contractor', 'Thing': 'yes!;maybe!;no!!' } EMPLOYEE_ASSERTION_PREFIXED = { 'PREFIX_Email': 'tim@example.com', 'PREFIX_UserName': 'tbo', 'PREFIX_FirstName': 'Tim', 'PREFIX_LastName': 'Bo', 'PREFIX_orgPersonType': 'SuperEmployee;BuildingX' } CONTRACTOR_ASSERTION = { 'Email': 'jill@example.com', 'UserName': 'jsmith', 'FirstName': 'Jill', 'LastName': 'Smith', 'orgPersonType': 'Contractor;Non-Dev' } ADMIN_ASSERTION = { 'Email': 'bob@example.com', 'UserName': 'bob', 'FirstName': 'Bob', 'LastName': 'Thompson', 'orgPersonType': 'Admin;Chief' } CUSTOMER_ASSERTION = { 'Email': 'beth@example.com', 'UserName': 'bwilliams', 'FirstName': 'Beth', 'LastName': 'Williams', 'orgPersonType': 'Customer' } ANOTHER_CUSTOMER_ASSERTION = { 'Email': 'mark@example.com', 'UserName': 'markcol', 'FirstName': 'Mark', 'LastName': 'Collins', 'orgPersonType': 'Managers;CEO;CTO' } TESTER_ASSERTION = { 'Email': 'testacct@example.com', 'UserName': 'testacct', 'FirstName': 'Test', 'LastName': 'Account', 'orgPersonType': 'MadeupGroup;Tester;GroupX' } ANOTHER_TESTER_ASSERTION = { 'Email': 'testacct@example.com', 'UserName': 'IamTester' } BAD_TESTER_ASSERTION = { 'Email': 'eviltester@example.org', 'UserName': 'Evil', 'FirstName': 'Test', 'LastName': 'Account', 'orgPersonType': 'Tester' } BAD_DEVELOPER_ASSERTION = { 'Email': 'evildeveloper@example.org', 'UserName': 'Evil', 'FirstName': 'Develop', 'LastName': 'Account', 'orgPersonType': 'Developer' } MALFORMED_TESTER_ASSERTION = { 'Email': 'testacct@example.com', 'UserName': 'testacct', 'FirstName': 'Test', 'LastName': 'Account', 'orgPersonType': 'Tester', 'object': object(), 'dictionary': dict(zip('teststring', range(10))), 'tuple': tuple(range(5)) } DEVELOPER_ASSERTION = { 'Email': 'developacct@example.com', 'UserName': 'developacct', 'FirstName': 'Develop', 'LastName': 'Account', 'orgPersonType': 'Developer' } CONTRACTOR_MALFORMED_ASSERTION = { 'UserName': 'user', 'FirstName': object(), 'orgPersonType': 'Contractor' } LOCAL_USER_ASSERTION = { 'UserName': 'marek', 'UserType': 'random' } ANOTHER_LOCAL_USER_ASSERTION = { 'UserName': 'marek', 'Position': 'DirectorGeneral' } UNMATCHED_GROUP_ASSERTION = { 'REMOTE_USER': 'Any Momoose', 'REMOTE_USER_GROUPS': 'EXISTS;NO_EXISTS' } GROUP_IDS_ASSERTION = { 'name': 'opilotte', 'group_ids': 'abc123;def456;ghi789', 'group': 'klm012' } GROUP_IDS_ASSERTION_ONLY_ONE_GROUP = { 'name': 'opilotte', 'group_ids': '321cba', 'group': '210mlk' } UNICODE_NAME_ASSERTION = { 'PFX_Email': 'jon@example.com', 'PFX_UserName': 'jonkare', 'PFX_FirstName': 'Jon Kåre', 'PFX_LastName': 'Hellån', 'PFX_orgPersonType': 'Admin;Chief' } MAPPING_UNICODE = { "rules": [ { "local": [ { "user": { "name": "{0} {1}", "email": "{2}" }, "group": { "id": EMPLOYEE_GROUP_ID } } ], "remote": [ { "type": "PFX_FirstName" }, { "type": "PFX_LastName" }, { "type": "PFX_Email" }, { "type": "PFX_orgPersonType", "any_one_of": [ "Admin", "Big Cheese" ] } ] }, ], } keystone-9.0.0/keystone/tests/unit/identity/0000775000567000056710000000000012701407246022340 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/identity/test_controllers.py0000664000567000056710000000443612701407102026315 0ustar jenkinsjenkins00000000000000# Copyright 2016 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from oslo_config import cfg from keystone import exception from keystone.identity import controllers from keystone.tests import unit from keystone.tests.unit.ksfixtures import database CONF = cfg.CONF _ADMIN_CONTEXT = {'is_admin': True, 'query_string': {}} class UserTestCaseNoDefaultDomain(unit.TestCase): def setUp(self): super(UserTestCaseNoDefaultDomain, self).setUp() self.useFixture(database.Database()) self.load_backends() self.user_controller = controllers.User() def test_setup(self): # Other tests in this class assume there's no default domain, so make # sure the setUp worked as expected. self.assertRaises( exception.DomainNotFound, self.resource_api.get_domain, CONF.identity.default_domain_id) def test_get_users(self): # When list_users is done and there's no default domain, the result is # an empty list. res = self.user_controller.get_users(_ADMIN_CONTEXT) self.assertEqual([], res['users']) def test_get_user_by_name(self): # When get_user_by_name is done and there's no default domain, the # result is 404 Not Found user_name = uuid.uuid4().hex self.assertRaises( exception.UserNotFound, self.user_controller.get_user_by_name, _ADMIN_CONTEXT, user_name) def test_create_user(self): # When a user is created using the v2 controller and there's no default # domain, it doesn't fail with can't find domain (a default domain is # created) user = {'name': uuid.uuid4().hex} self.user_controller.create_user(_ADMIN_CONTEXT, user) # If the above doesn't fail then this is successful. keystone-9.0.0/keystone/tests/unit/identity/__init__.py0000664000567000056710000000000012701407102024426 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/identity/test_core.py0000664000567000056710000001645612701407102024704 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for core identity behavior.""" import itertools import os import uuid import mock from oslo_config import cfg from oslo_config import fixture as config_fixture from keystone import exception from keystone import identity from keystone.tests import unit from keystone.tests.unit.ksfixtures import database CONF = cfg.CONF class TestDomainConfigs(unit.BaseTestCase): def setUp(self): super(TestDomainConfigs, self).setUp() self.addCleanup(CONF.reset) self.tmp_dir = unit.dirs.tmp() self.config_fixture = self.useFixture(config_fixture.Config(CONF)) self.config_fixture.config(domain_config_dir=self.tmp_dir, group='identity') def test_config_for_nonexistent_domain(self): """Having a config for a non-existent domain will be ignored. There are no assertions in this test because there are no side effects. If there is a config file for a domain that does not exist it should be ignored. """ domain_id = uuid.uuid4().hex domain_config_filename = os.path.join(self.tmp_dir, 'keystone.%s.conf' % domain_id) self.addCleanup(lambda: os.remove(domain_config_filename)) with open(domain_config_filename, 'w'): """Write an empty config file.""" e = exception.DomainNotFound(domain_id=domain_id) mock_assignment_api = mock.Mock() mock_assignment_api.get_domain_by_name.side_effect = e domain_config = identity.DomainConfigs() fake_standard_driver = None domain_config.setup_domain_drivers(fake_standard_driver, mock_assignment_api) def test_config_for_dot_name_domain(self): # Ensure we can get the right domain name which has dots within it # from filename. domain_config_filename = os.path.join(self.tmp_dir, 'keystone.abc.def.com.conf') with open(domain_config_filename, 'w'): """Write an empty config file.""" self.addCleanup(os.remove, domain_config_filename) with mock.patch.object(identity.DomainConfigs, '_load_config_from_file') as mock_load_config: domain_config = identity.DomainConfigs() fake_assignment_api = None fake_standard_driver = None domain_config.setup_domain_drivers(fake_standard_driver, fake_assignment_api) mock_load_config.assert_called_once_with(fake_assignment_api, [domain_config_filename], 'abc.def.com') def test_config_for_multiple_sql_backend(self): domains_config = identity.DomainConfigs() # Create the right sequence of is_sql in the drivers being # requested to expose the bug, which is that a False setting # means it forgets previous True settings. drivers = [] files = [] for idx, is_sql in enumerate((True, False, True)): drv = mock.Mock(is_sql=is_sql) drivers.append(drv) name = 'dummy.{0}'.format(idx) files.append(''.join(( identity.DOMAIN_CONF_FHEAD, name, identity.DOMAIN_CONF_FTAIL))) walk_fake = lambda *a, **kwa: ( ('/fake/keystone/domains/config', [], files), ) generic_driver = mock.Mock(is_sql=False) assignment_api = mock.Mock() id_factory = itertools.count() assignment_api.get_domain_by_name.side_effect = ( lambda name: {'id': next(id_factory), '_': 'fake_domain'}) load_driver_mock = mock.Mock(side_effect=drivers) with mock.patch.object(os, 'walk', walk_fake): with mock.patch.object(identity.cfg, 'ConfigOpts'): with mock.patch.object(domains_config, '_load_driver', load_driver_mock): self.assertRaises( exception.MultipleSQLDriversInConfig, domains_config.setup_domain_drivers, generic_driver, assignment_api) self.assertEqual(3, load_driver_mock.call_count) class TestDatabaseDomainConfigs(unit.TestCase): def setUp(self): super(TestDatabaseDomainConfigs, self).setUp() self.useFixture(database.Database()) self.load_backends() def test_domain_config_in_database_disabled_by_default(self): self.assertFalse(CONF.identity.domain_configurations_from_database) def test_loading_config_from_database(self): self.config_fixture.config(domain_configurations_from_database=True, group='identity') domain = unit.new_domain_ref() self.resource_api.create_domain(domain['id'], domain) # Override two config options for our domain conf = {'ldap': {'url': uuid.uuid4().hex, 'suffix': uuid.uuid4().hex, 'use_tls': 'True'}, 'identity': { 'driver': 'ldap'}} self.domain_config_api.create_config(domain['id'], conf) fake_standard_driver = None domain_config = identity.DomainConfigs() domain_config.setup_domain_drivers(fake_standard_driver, self.resource_api) # Make sure our two overrides are in place, and others are not affected res = domain_config.get_domain_conf(domain['id']) self.assertEqual(conf['ldap']['url'], res.ldap.url) self.assertEqual(conf['ldap']['suffix'], res.ldap.suffix) self.assertEqual(CONF.ldap.query_scope, res.ldap.query_scope) # Make sure the override is not changing the type of the config value use_tls_type = type(CONF.ldap.use_tls) self.assertEqual(use_tls_type(conf['ldap']['use_tls']), res.ldap.use_tls) # Now turn off using database domain configuration and check that the # default config file values are now seen instead of the overrides. CONF.set_override('domain_configurations_from_database', False, 'identity', enforce_type=True) domain_config = identity.DomainConfigs() domain_config.setup_domain_drivers(fake_standard_driver, self.resource_api) res = domain_config.get_domain_conf(domain['id']) self.assertEqual(CONF.ldap.url, res.ldap.url) self.assertEqual(CONF.ldap.suffix, res.ldap.suffix) self.assertEqual(CONF.ldap.use_tls, res.ldap.use_tls) self.assertEqual(CONF.ldap.query_scope, res.ldap.query_scope) keystone-9.0.0/keystone/tests/unit/identity/test_backends.py0000664000567000056710000016164612701407105025533 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import mock from oslo_config import cfg from six.moves import range from testtools import matchers from keystone.common import driver_hints from keystone import exception from keystone.tests import unit from keystone.tests.unit import default_fixtures from keystone.tests.unit import filtering CONF = cfg.CONF class IdentityTests(object): def _get_domain_fixture(self): domain = unit.new_domain_ref() self.resource_api.create_domain(domain['id'], domain) return domain def _set_domain_scope(self, domain_id): # We only provide a domain scope if we have multiple drivers if CONF.identity.domain_specific_drivers_enabled: return domain_id def test_authenticate_bad_user(self): self.assertRaises(AssertionError, self.identity_api.authenticate, context={}, user_id=uuid.uuid4().hex, password=self.user_foo['password']) def test_authenticate_bad_password(self): self.assertRaises(AssertionError, self.identity_api.authenticate, context={}, user_id=self.user_foo['id'], password=uuid.uuid4().hex) def test_authenticate(self): user_ref = self.identity_api.authenticate( context={}, user_id=self.user_sna['id'], password=self.user_sna['password']) # NOTE(termie): the password field is left in user_sna to make # it easier to authenticate in tests, but should # not be returned by the api self.user_sna.pop('password') self.user_sna['enabled'] = True self.assertDictEqual(self.user_sna, user_ref) def test_authenticate_and_get_roles_no_metadata(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) # Remove user id. It is ignored by create_user() and will break the # subset test below. del user['id'] new_user = self.identity_api.create_user(user) self.assignment_api.add_user_to_project(self.tenant_baz['id'], new_user['id']) user_ref = self.identity_api.authenticate( context={}, user_id=new_user['id'], password=user['password']) self.assertNotIn('password', user_ref) # NOTE(termie): the password field is left in user_sna to make # it easier to authenticate in tests, but should # not be returned by the api user.pop('password') self.assertDictContainsSubset(user, user_ref) role_list = self.assignment_api.get_roles_for_user_and_project( new_user['id'], self.tenant_baz['id']) self.assertEqual(1, len(role_list)) self.assertIn(CONF.member_role_id, role_list) def test_authenticate_if_no_password_set(self): id_ = uuid.uuid4().hex user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) self.identity_api.create_user(user) self.assertRaises(AssertionError, self.identity_api.authenticate, context={}, user_id=id_, password='password') def test_create_unicode_user_name(self): unicode_name = u'name \u540d\u5b57' user = unit.new_user_ref(name=unicode_name, domain_id=CONF.identity.default_domain_id) ref = self.identity_api.create_user(user) self.assertEqual(unicode_name, ref['name']) def test_get_user(self): user_ref = self.identity_api.get_user(self.user_foo['id']) # NOTE(termie): the password field is left in user_foo to make # it easier to authenticate in tests, but should # not be returned by the api self.user_foo.pop('password') self.assertDictEqual(self.user_foo, user_ref) @unit.skip_if_cache_disabled('identity') def test_cache_layer_get_user(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) self.identity_api.create_user(user) ref = self.identity_api.get_user_by_name(user['name'], user['domain_id']) # cache the result. self.identity_api.get_user(ref['id']) # delete bypassing identity api domain_id, driver, entity_id = ( self.identity_api._get_domain_driver_and_entity_id(ref['id'])) driver.delete_user(entity_id) self.assertDictEqual(ref, self.identity_api.get_user(ref['id'])) self.identity_api.get_user.invalidate(self.identity_api, ref['id']) self.assertRaises(exception.UserNotFound, self.identity_api.get_user, ref['id']) user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user = self.identity_api.create_user(user) ref = self.identity_api.get_user_by_name(user['name'], user['domain_id']) user['description'] = uuid.uuid4().hex # cache the result. self.identity_api.get_user(ref['id']) # update using identity api and get back updated user. user_updated = self.identity_api.update_user(ref['id'], user) self.assertDictContainsSubset(self.identity_api.get_user(ref['id']), user_updated) self.assertDictContainsSubset( self.identity_api.get_user_by_name(ref['name'], ref['domain_id']), user_updated) def test_get_user_returns_not_found(self): self.assertRaises(exception.UserNotFound, self.identity_api.get_user, uuid.uuid4().hex) def test_get_user_by_name(self): user_ref = self.identity_api.get_user_by_name( self.user_foo['name'], CONF.identity.default_domain_id) # NOTE(termie): the password field is left in user_foo to make # it easier to authenticate in tests, but should # not be returned by the api self.user_foo.pop('password') self.assertDictEqual(self.user_foo, user_ref) @unit.skip_if_cache_disabled('identity') def test_cache_layer_get_user_by_name(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) self.identity_api.create_user(user) ref = self.identity_api.get_user_by_name(user['name'], user['domain_id']) # delete bypassing the identity api. domain_id, driver, entity_id = ( self.identity_api._get_domain_driver_and_entity_id(ref['id'])) driver.delete_user(entity_id) self.assertDictEqual(ref, self.identity_api.get_user_by_name( user['name'], CONF.identity.default_domain_id)) self.identity_api.get_user_by_name.invalidate( self.identity_api, user['name'], CONF.identity.default_domain_id) self.assertRaises(exception.UserNotFound, self.identity_api.get_user_by_name, user['name'], CONF.identity.default_domain_id) user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user = self.identity_api.create_user(user) ref = self.identity_api.get_user_by_name(user['name'], user['domain_id']) user['description'] = uuid.uuid4().hex user_updated = self.identity_api.update_user(ref['id'], user) self.assertDictContainsSubset(self.identity_api.get_user(ref['id']), user_updated) self.assertDictContainsSubset( self.identity_api.get_user_by_name(ref['name'], ref['domain_id']), user_updated) def test_get_user_by_name_returns_not_found(self): self.assertRaises(exception.UserNotFound, self.identity_api.get_user_by_name, uuid.uuid4().hex, CONF.identity.default_domain_id) def test_create_duplicate_user_name_fails(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user = self.identity_api.create_user(user) self.assertRaises(exception.Conflict, self.identity_api.create_user, user) def test_create_duplicate_user_name_in_different_domains(self): new_domain = unit.new_domain_ref() self.resource_api.create_domain(new_domain['id'], new_domain) user1 = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user2 = unit.new_user_ref(name=user1['name'], domain_id=new_domain['id']) self.identity_api.create_user(user1) self.identity_api.create_user(user2) def test_move_user_between_domains(self): domain1 = unit.new_domain_ref() self.resource_api.create_domain(domain1['id'], domain1) domain2 = unit.new_domain_ref() self.resource_api.create_domain(domain2['id'], domain2) user = unit.new_user_ref(domain_id=domain1['id']) user = self.identity_api.create_user(user) user['domain_id'] = domain2['id'] # Update the user asserting that a deprecation warning is emitted with mock.patch( 'oslo_log.versionutils.report_deprecated_feature') as mock_dep: self.identity_api.update_user(user['id'], user) self.assertTrue(mock_dep.called) updated_user_ref = self.identity_api.get_user(user['id']) self.assertEqual(domain2['id'], updated_user_ref['domain_id']) def test_move_user_between_domains_with_clashing_names_fails(self): domain1 = unit.new_domain_ref() self.resource_api.create_domain(domain1['id'], domain1) domain2 = unit.new_domain_ref() self.resource_api.create_domain(domain2['id'], domain2) # First, create a user in domain1 user1 = unit.new_user_ref(domain_id=domain1['id']) user1 = self.identity_api.create_user(user1) # Now create a user in domain2 with a potentially clashing # name - which should work since we have domain separation user2 = unit.new_user_ref(name=user1['name'], domain_id=domain2['id']) user2 = self.identity_api.create_user(user2) # Now try and move user1 into the 2nd domain - which should # fail since the names clash user1['domain_id'] = domain2['id'] self.assertRaises(exception.Conflict, self.identity_api.update_user, user1['id'], user1) def test_rename_duplicate_user_name_fails(self): user1 = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user2 = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) self.identity_api.create_user(user1) user2 = self.identity_api.create_user(user2) user2['name'] = user1['name'] self.assertRaises(exception.Conflict, self.identity_api.update_user, user2['id'], user2) def test_update_user_id_fails(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user = self.identity_api.create_user(user) original_id = user['id'] user['id'] = 'fake2' self.assertRaises(exception.ValidationError, self.identity_api.update_user, original_id, user) user_ref = self.identity_api.get_user(original_id) self.assertEqual(original_id, user_ref['id']) self.assertRaises(exception.UserNotFound, self.identity_api.get_user, 'fake2') def test_delete_user_with_group_project_domain_links(self): role1 = unit.new_role_ref() self.role_api.create_role(role1['id'], role1) domain1 = unit.new_domain_ref() self.resource_api.create_domain(domain1['id'], domain1) project1 = unit.new_project_ref(domain_id=domain1['id']) self.resource_api.create_project(project1['id'], project1) user1 = unit.new_user_ref(domain_id=domain1['id']) user1 = self.identity_api.create_user(user1) group1 = unit.new_group_ref(domain_id=domain1['id']) group1 = self.identity_api.create_group(group1) self.assignment_api.create_grant(user_id=user1['id'], project_id=project1['id'], role_id=role1['id']) self.assignment_api.create_grant(user_id=user1['id'], domain_id=domain1['id'], role_id=role1['id']) self.identity_api.add_user_to_group(user_id=user1['id'], group_id=group1['id']) roles_ref = self.assignment_api.list_grants( user_id=user1['id'], project_id=project1['id']) self.assertEqual(1, len(roles_ref)) roles_ref = self.assignment_api.list_grants( user_id=user1['id'], domain_id=domain1['id']) self.assertEqual(1, len(roles_ref)) self.identity_api.check_user_in_group( user_id=user1['id'], group_id=group1['id']) self.identity_api.delete_user(user1['id']) self.assertRaises(exception.NotFound, self.identity_api.check_user_in_group, user1['id'], group1['id']) def test_delete_group_with_user_project_domain_links(self): role1 = unit.new_role_ref() self.role_api.create_role(role1['id'], role1) domain1 = unit.new_domain_ref() self.resource_api.create_domain(domain1['id'], domain1) project1 = unit.new_project_ref(domain_id=domain1['id']) self.resource_api.create_project(project1['id'], project1) user1 = unit.new_user_ref(domain_id=domain1['id']) user1 = self.identity_api.create_user(user1) group1 = unit.new_group_ref(domain_id=domain1['id']) group1 = self.identity_api.create_group(group1) self.assignment_api.create_grant(group_id=group1['id'], project_id=project1['id'], role_id=role1['id']) self.assignment_api.create_grant(group_id=group1['id'], domain_id=domain1['id'], role_id=role1['id']) self.identity_api.add_user_to_group(user_id=user1['id'], group_id=group1['id']) roles_ref = self.assignment_api.list_grants( group_id=group1['id'], project_id=project1['id']) self.assertEqual(1, len(roles_ref)) roles_ref = self.assignment_api.list_grants( group_id=group1['id'], domain_id=domain1['id']) self.assertEqual(1, len(roles_ref)) self.identity_api.check_user_in_group( user_id=user1['id'], group_id=group1['id']) self.identity_api.delete_group(group1['id']) self.identity_api.get_user(user1['id']) def test_update_user_returns_not_found(self): user_id = uuid.uuid4().hex self.assertRaises(exception.UserNotFound, self.identity_api.update_user, user_id, {'id': user_id, 'domain_id': CONF.identity.default_domain_id}) def test_delete_user_returns_not_found(self): self.assertRaises(exception.UserNotFound, self.identity_api.delete_user, uuid.uuid4().hex) def test_create_user_long_name_fails(self): user = unit.new_user_ref(name='a' * 256, domain_id=CONF.identity.default_domain_id) self.assertRaises(exception.ValidationError, self.identity_api.create_user, user) def test_create_user_blank_name_fails(self): user = unit.new_user_ref(name='', domain_id=CONF.identity.default_domain_id) self.assertRaises(exception.ValidationError, self.identity_api.create_user, user) def test_create_user_missed_password(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user = self.identity_api.create_user(user) self.identity_api.get_user(user['id']) # Make sure the user is not allowed to login # with a password that is empty string or None self.assertRaises(AssertionError, self.identity_api.authenticate, context={}, user_id=user['id'], password='') self.assertRaises(AssertionError, self.identity_api.authenticate, context={}, user_id=user['id'], password=None) def test_create_user_none_password(self): user = unit.new_user_ref(password=None, domain_id=CONF.identity.default_domain_id) user = self.identity_api.create_user(user) self.identity_api.get_user(user['id']) # Make sure the user is not allowed to login # with a password that is empty string or None self.assertRaises(AssertionError, self.identity_api.authenticate, context={}, user_id=user['id'], password='') self.assertRaises(AssertionError, self.identity_api.authenticate, context={}, user_id=user['id'], password=None) def test_create_user_invalid_name_fails(self): user = unit.new_user_ref(name=None, domain_id=CONF.identity.default_domain_id) self.assertRaises(exception.ValidationError, self.identity_api.create_user, user) user = unit.new_user_ref(name=123, domain_id=CONF.identity.default_domain_id) self.assertRaises(exception.ValidationError, self.identity_api.create_user, user) def test_create_user_invalid_enabled_type_string(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id, # invalid string value enabled='true') self.assertRaises(exception.ValidationError, self.identity_api.create_user, user) def test_update_user_long_name_fails(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user = self.identity_api.create_user(user) user['name'] = 'a' * 256 self.assertRaises(exception.ValidationError, self.identity_api.update_user, user['id'], user) def test_update_user_blank_name_fails(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user = self.identity_api.create_user(user) user['name'] = '' self.assertRaises(exception.ValidationError, self.identity_api.update_user, user['id'], user) def test_update_user_invalid_name_fails(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user = self.identity_api.create_user(user) user['name'] = None self.assertRaises(exception.ValidationError, self.identity_api.update_user, user['id'], user) user['name'] = 123 self.assertRaises(exception.ValidationError, self.identity_api.update_user, user['id'], user) def test_list_users(self): users = self.identity_api.list_users( domain_scope=self._set_domain_scope( CONF.identity.default_domain_id)) self.assertEqual(len(default_fixtures.USERS), len(users)) user_ids = set(user['id'] for user in users) expected_user_ids = set(getattr(self, 'user_%s' % user['id'])['id'] for user in default_fixtures.USERS) for user_ref in users: self.assertNotIn('password', user_ref) self.assertEqual(expected_user_ids, user_ids) def test_list_groups(self): group1 = unit.new_group_ref(domain_id=CONF.identity.default_domain_id) group2 = unit.new_group_ref(domain_id=CONF.identity.default_domain_id) group1 = self.identity_api.create_group(group1) group2 = self.identity_api.create_group(group2) groups = self.identity_api.list_groups( domain_scope=self._set_domain_scope( CONF.identity.default_domain_id)) self.assertEqual(2, len(groups)) group_ids = [] for group in groups: group_ids.append(group.get('id')) self.assertIn(group1['id'], group_ids) self.assertIn(group2['id'], group_ids) def test_create_user_doesnt_modify_passed_in_dict(self): new_user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) original_user = new_user.copy() self.identity_api.create_user(new_user) self.assertDictEqual(original_user, new_user) def test_update_user_enable(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user = self.identity_api.create_user(user) user_ref = self.identity_api.get_user(user['id']) self.assertTrue(user_ref['enabled']) user['enabled'] = False self.identity_api.update_user(user['id'], user) user_ref = self.identity_api.get_user(user['id']) self.assertEqual(user['enabled'], user_ref['enabled']) # If not present, enabled field should not be updated del user['enabled'] self.identity_api.update_user(user['id'], user) user_ref = self.identity_api.get_user(user['id']) self.assertFalse(user_ref['enabled']) user['enabled'] = True self.identity_api.update_user(user['id'], user) user_ref = self.identity_api.get_user(user['id']) self.assertEqual(user['enabled'], user_ref['enabled']) del user['enabled'] self.identity_api.update_user(user['id'], user) user_ref = self.identity_api.get_user(user['id']) self.assertTrue(user_ref['enabled']) # Integers are valid Python's booleans. Explicitly test it. user['enabled'] = 0 self.identity_api.update_user(user['id'], user) user_ref = self.identity_api.get_user(user['id']) self.assertFalse(user_ref['enabled']) # Any integers other than 0 are interpreted as True user['enabled'] = -42 self.identity_api.update_user(user['id'], user) user_ref = self.identity_api.get_user(user['id']) # NOTE(breton): below, attribute `enabled` is explicitly tested to be # equal True. assertTrue should not be used, because it converts # the passed value to bool(). self.assertIs(user_ref['enabled'], True) def test_update_user_name(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user = self.identity_api.create_user(user) user_ref = self.identity_api.get_user(user['id']) self.assertEqual(user['name'], user_ref['name']) changed_name = user_ref['name'] + '_changed' user_ref['name'] = changed_name updated_user = self.identity_api.update_user(user_ref['id'], user_ref) # NOTE(dstanek): the SQL backend adds an 'extra' field containing a # dictionary of the extra fields in addition to the # fields in the object. For the details see: # SqlIdentity.test_update_project_returns_extra updated_user.pop('extra', None) self.assertDictEqual(user_ref, updated_user) user_ref = self.identity_api.get_user(user_ref['id']) self.assertEqual(changed_name, user_ref['name']) def test_update_user_enable_fails(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user = self.identity_api.create_user(user) user_ref = self.identity_api.get_user(user['id']) self.assertTrue(user_ref['enabled']) # Strings are not valid boolean values user['enabled'] = 'false' self.assertRaises(exception.ValidationError, self.identity_api.update_user, user['id'], user) def test_add_user_to_group(self): domain = self._get_domain_fixture() new_group = unit.new_group_ref(domain_id=domain['id']) new_group = self.identity_api.create_group(new_group) new_user = unit.new_user_ref(domain_id=domain['id']) new_user = self.identity_api.create_user(new_user) self.identity_api.add_user_to_group(new_user['id'], new_group['id']) groups = self.identity_api.list_groups_for_user(new_user['id']) found = False for x in groups: if (x['id'] == new_group['id']): found = True self.assertTrue(found) def test_add_user_to_group_returns_not_found(self): domain = self._get_domain_fixture() new_user = unit.new_user_ref(domain_id=domain['id']) new_user = self.identity_api.create_user(new_user) self.assertRaises(exception.GroupNotFound, self.identity_api.add_user_to_group, new_user['id'], uuid.uuid4().hex) new_group = unit.new_group_ref(domain_id=domain['id']) new_group = self.identity_api.create_group(new_group) self.assertRaises(exception.UserNotFound, self.identity_api.add_user_to_group, uuid.uuid4().hex, new_group['id']) self.assertRaises(exception.NotFound, self.identity_api.add_user_to_group, uuid.uuid4().hex, uuid.uuid4().hex) def test_check_user_in_group(self): domain = self._get_domain_fixture() new_group = unit.new_group_ref(domain_id=domain['id']) new_group = self.identity_api.create_group(new_group) new_user = unit.new_user_ref(domain_id=domain['id']) new_user = self.identity_api.create_user(new_user) self.identity_api.add_user_to_group(new_user['id'], new_group['id']) self.identity_api.check_user_in_group(new_user['id'], new_group['id']) def test_check_user_not_in_group(self): new_group = unit.new_group_ref( domain_id=CONF.identity.default_domain_id) new_group = self.identity_api.create_group(new_group) new_user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) new_user = self.identity_api.create_user(new_user) self.assertRaises(exception.NotFound, self.identity_api.check_user_in_group, new_user['id'], new_group['id']) def test_check_user_in_group_returns_not_found(self): new_user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) new_user = self.identity_api.create_user(new_user) new_group = unit.new_group_ref( domain_id=CONF.identity.default_domain_id) new_group = self.identity_api.create_group(new_group) self.assertRaises(exception.UserNotFound, self.identity_api.check_user_in_group, uuid.uuid4().hex, new_group['id']) self.assertRaises(exception.GroupNotFound, self.identity_api.check_user_in_group, new_user['id'], uuid.uuid4().hex) self.assertRaises(exception.NotFound, self.identity_api.check_user_in_group, uuid.uuid4().hex, uuid.uuid4().hex) def test_list_users_in_group(self): domain = self._get_domain_fixture() new_group = unit.new_group_ref(domain_id=domain['id']) new_group = self.identity_api.create_group(new_group) # Make sure we get an empty list back on a new group, not an error. user_refs = self.identity_api.list_users_in_group(new_group['id']) self.assertEqual([], user_refs) # Make sure we get the correct users back once they have been added # to the group. new_user = unit.new_user_ref(domain_id=domain['id']) new_user = self.identity_api.create_user(new_user) self.identity_api.add_user_to_group(new_user['id'], new_group['id']) user_refs = self.identity_api.list_users_in_group(new_group['id']) found = False for x in user_refs: if (x['id'] == new_user['id']): found = True self.assertNotIn('password', x) self.assertTrue(found) def test_list_users_in_group_returns_not_found(self): self.assertRaises(exception.GroupNotFound, self.identity_api.list_users_in_group, uuid.uuid4().hex) def test_list_groups_for_user(self): domain = self._get_domain_fixture() test_groups = [] test_users = [] GROUP_COUNT = 3 USER_COUNT = 2 for x in range(0, USER_COUNT): new_user = unit.new_user_ref(domain_id=domain['id']) new_user = self.identity_api.create_user(new_user) test_users.append(new_user) positive_user = test_users[0] negative_user = test_users[1] for x in range(0, USER_COUNT): group_refs = self.identity_api.list_groups_for_user( test_users[x]['id']) self.assertEqual(0, len(group_refs)) for x in range(0, GROUP_COUNT): before_count = x after_count = x + 1 new_group = unit.new_group_ref(domain_id=domain['id']) new_group = self.identity_api.create_group(new_group) test_groups.append(new_group) # add the user to the group and ensure that the # group count increases by one for each group_refs = self.identity_api.list_groups_for_user( positive_user['id']) self.assertEqual(before_count, len(group_refs)) self.identity_api.add_user_to_group( positive_user['id'], new_group['id']) group_refs = self.identity_api.list_groups_for_user( positive_user['id']) self.assertEqual(after_count, len(group_refs)) # Make sure the group count for the unrelated user did not change group_refs = self.identity_api.list_groups_for_user( negative_user['id']) self.assertEqual(0, len(group_refs)) # remove the user from each group and ensure that # the group count reduces by one for each for x in range(0, 3): before_count = GROUP_COUNT - x after_count = GROUP_COUNT - x - 1 group_refs = self.identity_api.list_groups_for_user( positive_user['id']) self.assertEqual(before_count, len(group_refs)) self.identity_api.remove_user_from_group( positive_user['id'], test_groups[x]['id']) group_refs = self.identity_api.list_groups_for_user( positive_user['id']) self.assertEqual(after_count, len(group_refs)) # Make sure the group count for the unrelated user # did not change group_refs = self.identity_api.list_groups_for_user( negative_user['id']) self.assertEqual(0, len(group_refs)) def test_remove_user_from_group(self): domain = self._get_domain_fixture() new_group = unit.new_group_ref(domain_id=domain['id']) new_group = self.identity_api.create_group(new_group) new_user = unit.new_user_ref(domain_id=domain['id']) new_user = self.identity_api.create_user(new_user) self.identity_api.add_user_to_group(new_user['id'], new_group['id']) groups = self.identity_api.list_groups_for_user(new_user['id']) self.assertIn(new_group['id'], [x['id'] for x in groups]) self.identity_api.remove_user_from_group(new_user['id'], new_group['id']) groups = self.identity_api.list_groups_for_user(new_user['id']) self.assertNotIn(new_group['id'], [x['id'] for x in groups]) def test_remove_user_from_group_returns_not_found(self): domain = self._get_domain_fixture() new_user = unit.new_user_ref(domain_id=domain['id']) new_user = self.identity_api.create_user(new_user) new_group = unit.new_group_ref(domain_id=domain['id']) new_group = self.identity_api.create_group(new_group) self.assertRaises(exception.GroupNotFound, self.identity_api.remove_user_from_group, new_user['id'], uuid.uuid4().hex) self.assertRaises(exception.UserNotFound, self.identity_api.remove_user_from_group, uuid.uuid4().hex, new_group['id']) self.assertRaises(exception.NotFound, self.identity_api.remove_user_from_group, uuid.uuid4().hex, uuid.uuid4().hex) def test_group_crud(self): domain = unit.new_domain_ref() self.resource_api.create_domain(domain['id'], domain) group = unit.new_group_ref(domain_id=domain['id']) group = self.identity_api.create_group(group) group_ref = self.identity_api.get_group(group['id']) self.assertDictContainsSubset(group, group_ref) group['name'] = uuid.uuid4().hex self.identity_api.update_group(group['id'], group) group_ref = self.identity_api.get_group(group['id']) self.assertDictContainsSubset(group, group_ref) self.identity_api.delete_group(group['id']) self.assertRaises(exception.GroupNotFound, self.identity_api.get_group, group['id']) def test_get_group_by_name(self): group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id) group_name = group['name'] group = self.identity_api.create_group(group) spoiler = unit.new_group_ref(domain_id=CONF.identity.default_domain_id) self.identity_api.create_group(spoiler) group_ref = self.identity_api.get_group_by_name( group_name, CONF.identity.default_domain_id) self.assertDictEqual(group, group_ref) def test_get_group_by_name_returns_not_found(self): self.assertRaises(exception.GroupNotFound, self.identity_api.get_group_by_name, uuid.uuid4().hex, CONF.identity.default_domain_id) @unit.skip_if_cache_disabled('identity') def test_cache_layer_group_crud(self): group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id) group = self.identity_api.create_group(group) # cache the result group_ref = self.identity_api.get_group(group['id']) # delete the group bypassing identity api. domain_id, driver, entity_id = ( self.identity_api._get_domain_driver_and_entity_id(group['id'])) driver.delete_group(entity_id) self.assertEqual(group_ref, self.identity_api.get_group(group['id'])) self.identity_api.get_group.invalidate(self.identity_api, group['id']) self.assertRaises(exception.GroupNotFound, self.identity_api.get_group, group['id']) group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id) group = self.identity_api.create_group(group) # cache the result self.identity_api.get_group(group['id']) group['name'] = uuid.uuid4().hex group_ref = self.identity_api.update_group(group['id'], group) # after updating through identity api, get updated group self.assertDictContainsSubset(self.identity_api.get_group(group['id']), group_ref) def test_create_duplicate_group_name_fails(self): group1 = unit.new_group_ref(domain_id=CONF.identity.default_domain_id) group2 = unit.new_group_ref(domain_id=CONF.identity.default_domain_id, name=group1['name']) group1 = self.identity_api.create_group(group1) self.assertRaises(exception.Conflict, self.identity_api.create_group, group2) def test_create_duplicate_group_name_in_different_domains(self): new_domain = unit.new_domain_ref() self.resource_api.create_domain(new_domain['id'], new_domain) group1 = unit.new_group_ref(domain_id=CONF.identity.default_domain_id) group2 = unit.new_group_ref(domain_id=new_domain['id'], name=group1['name']) group1 = self.identity_api.create_group(group1) group2 = self.identity_api.create_group(group2) def test_move_group_between_domains(self): domain1 = unit.new_domain_ref() self.resource_api.create_domain(domain1['id'], domain1) domain2 = unit.new_domain_ref() self.resource_api.create_domain(domain2['id'], domain2) group = unit.new_group_ref(domain_id=domain1['id']) group = self.identity_api.create_group(group) group['domain_id'] = domain2['id'] # Update the group asserting that a deprecation warning is emitted with mock.patch( 'oslo_log.versionutils.report_deprecated_feature') as mock_dep: self.identity_api.update_group(group['id'], group) self.assertTrue(mock_dep.called) updated_group_ref = self.identity_api.get_group(group['id']) self.assertEqual(domain2['id'], updated_group_ref['domain_id']) def test_move_group_between_domains_with_clashing_names_fails(self): domain1 = unit.new_domain_ref() self.resource_api.create_domain(domain1['id'], domain1) domain2 = unit.new_domain_ref() self.resource_api.create_domain(domain2['id'], domain2) # First, create a group in domain1 group1 = unit.new_group_ref(domain_id=domain1['id']) group1 = self.identity_api.create_group(group1) # Now create a group in domain2 with a potentially clashing # name - which should work since we have domain separation group2 = unit.new_group_ref(name=group1['name'], domain_id=domain2['id']) group2 = self.identity_api.create_group(group2) # Now try and move group1 into the 2nd domain - which should # fail since the names clash group1['domain_id'] = domain2['id'] self.assertRaises(exception.Conflict, self.identity_api.update_group, group1['id'], group1) def test_user_crud(self): user_dict = unit.new_user_ref( domain_id=CONF.identity.default_domain_id) del user_dict['id'] user = self.identity_api.create_user(user_dict) user_ref = self.identity_api.get_user(user['id']) del user_dict['password'] user_ref_dict = {x: user_ref[x] for x in user_ref} self.assertDictContainsSubset(user_dict, user_ref_dict) user_dict['password'] = uuid.uuid4().hex self.identity_api.update_user(user['id'], user_dict) user_ref = self.identity_api.get_user(user['id']) del user_dict['password'] user_ref_dict = {x: user_ref[x] for x in user_ref} self.assertDictContainsSubset(user_dict, user_ref_dict) self.identity_api.delete_user(user['id']) self.assertRaises(exception.UserNotFound, self.identity_api.get_user, user['id']) def test_arbitrary_attributes_are_returned_from_create_user(self): attr_value = uuid.uuid4().hex user_data = unit.new_user_ref( domain_id=CONF.identity.default_domain_id, arbitrary_attr=attr_value) user = self.identity_api.create_user(user_data) self.assertEqual(attr_value, user['arbitrary_attr']) def test_arbitrary_attributes_are_returned_from_get_user(self): attr_value = uuid.uuid4().hex user_data = unit.new_user_ref( domain_id=CONF.identity.default_domain_id, arbitrary_attr=attr_value) user_data = self.identity_api.create_user(user_data) user = self.identity_api.get_user(user_data['id']) self.assertEqual(attr_value, user['arbitrary_attr']) def test_new_arbitrary_attributes_are_returned_from_update_user(self): user_data = unit.new_user_ref( domain_id=CONF.identity.default_domain_id) user = self.identity_api.create_user(user_data) attr_value = uuid.uuid4().hex user['arbitrary_attr'] = attr_value updated_user = self.identity_api.update_user(user['id'], user) self.assertEqual(attr_value, updated_user['arbitrary_attr']) def test_updated_arbitrary_attributes_are_returned_from_update_user(self): attr_value = uuid.uuid4().hex user_data = unit.new_user_ref( domain_id=CONF.identity.default_domain_id, arbitrary_attr=attr_value) new_attr_value = uuid.uuid4().hex user = self.identity_api.create_user(user_data) user['arbitrary_attr'] = new_attr_value updated_user = self.identity_api.update_user(user['id'], user) self.assertEqual(new_attr_value, updated_user['arbitrary_attr']) def test_user_update_and_user_get_return_same_response(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user = self.identity_api.create_user(user) updated_user = {'enabled': False} updated_user_ref = self.identity_api.update_user( user['id'], updated_user) # SQL backend adds 'extra' field updated_user_ref.pop('extra', None) self.assertIs(False, updated_user_ref['enabled']) user_ref = self.identity_api.get_user(user['id']) self.assertDictEqual(updated_user_ref, user_ref) class FilterTests(filtering.FilterTests): def test_list_entities_filtered(self): for entity in ['user', 'group', 'project']: # Create 20 entities entity_list = self._create_test_data(entity, 20) # Try filtering to get one an exact item out of the list hints = driver_hints.Hints() hints.add_filter('name', entity_list[10]['name']) entities = self._list_entities(entity)(hints=hints) self.assertEqual(1, len(entities)) self.assertEqual(entity_list[10]['id'], entities[0]['id']) # Check the driver has removed the filter from the list hints self.assertFalse(hints.get_exact_filter_by_name('name')) self._delete_test_data(entity, entity_list) def test_list_users_inexact_filtered(self): # Create 20 users, some with specific names. We set the names at create # time (rather than updating them), since the LDAP driver does not # support name updates. user_name_data = { # user index: name for user 5: 'The', 6: 'The Ministry', 7: 'The Ministry of', 8: 'The Ministry of Silly', 9: 'The Ministry of Silly Walks', # ...and one for useful case insensitivity testing 10: 'The ministry of silly walks OF' } user_list = self._create_test_data( 'user', 20, domain_id=CONF.identity.default_domain_id, name_dict=user_name_data) hints = driver_hints.Hints() hints.add_filter('name', 'ministry', comparator='contains') users = self.identity_api.list_users(hints=hints) self.assertEqual(5, len(users)) self._match_with_list(users, user_list, list_start=6, list_end=11) # TODO(henry-nash) Check inexact filter has been removed. hints = driver_hints.Hints() hints.add_filter('name', 'The', comparator='startswith') users = self.identity_api.list_users(hints=hints) self.assertEqual(6, len(users)) self._match_with_list(users, user_list, list_start=5, list_end=11) # TODO(henry-nash) Check inexact filter has been removed. hints = driver_hints.Hints() hints.add_filter('name', 'of', comparator='endswith') users = self.identity_api.list_users(hints=hints) self.assertEqual(2, len(users)) # We can't assume we will get back the users in any particular order self.assertIn(user_list[7]['id'], [users[0]['id'], users[1]['id']]) self.assertIn(user_list[10]['id'], [users[0]['id'], users[1]['id']]) # TODO(henry-nash) Check inexact filter has been removed. # TODO(henry-nash): Add some case sensitive tests. However, # these would be hard to validate currently, since: # # For SQL, the issue is that MySQL 0.7, by default, is installed in # case insensitive mode (which is what is run by default for our # SQL backend tests). For production deployments. OpenStack # assumes a case sensitive database. For these tests, therefore, we # need to be able to check the sensitivity of the database so as to # know whether to run case sensitive tests here. # # For LDAP/AD, although dependent on the schema being used, attributes # are typically configured to be case aware, but not case sensitive. self._delete_test_data('user', user_list) def _groups_for_user_data(self): number_of_groups = 10 group_name_data = { # entity index: name for entity 5: 'The', 6: 'The Ministry', 9: 'The Ministry of Silly Walks', } group_list = self._create_test_data( 'group', number_of_groups, domain_id=CONF.identity.default_domain_id, name_dict=group_name_data) user_list = self._create_test_data('user', 2) for group in range(7): # Create membership, including with two out of the three groups # with well know names self.identity_api.add_user_to_group(user_list[0]['id'], group_list[group]['id']) # ...and some spoiler memberships for group in range(7, number_of_groups): self.identity_api.add_user_to_group(user_list[1]['id'], group_list[group]['id']) return group_list, user_list def test_groups_for_user_inexact_filtered(self): """Test use of filtering doesn't break groups_for_user listing. Some backends may use filtering to achieve the list of groups for a user, so test that it can combine a second filter. Test Plan: - Create 10 groups, some with names we can filter on - Create 2 users - Assign 1 of those users to most of the groups, including some of the well known named ones - Assign the other user to other groups as spoilers - Ensure that when we list groups for users with a filter on the group name, both restrictions have been enforced on what is returned. """ group_list, user_list = self._groups_for_user_data() hints = driver_hints.Hints() hints.add_filter('name', 'Ministry', comparator='contains') groups = self.identity_api.list_groups_for_user( user_list[0]['id'], hints=hints) # We should only get back one group, since of the two that contain # 'Ministry' the user only belongs to one. self.assertThat(len(groups), matchers.Equals(1)) self.assertEqual(group_list[6]['id'], groups[0]['id']) hints = driver_hints.Hints() hints.add_filter('name', 'The', comparator='startswith') groups = self.identity_api.list_groups_for_user( user_list[0]['id'], hints=hints) # We should only get back 2 out of the 3 groups that start with 'The' # hence showing that both "filters" have been applied self.assertThat(len(groups), matchers.Equals(2)) self.assertIn(group_list[5]['id'], [groups[0]['id'], groups[1]['id']]) self.assertIn(group_list[6]['id'], [groups[0]['id'], groups[1]['id']]) hints.add_filter('name', 'The', comparator='endswith') groups = self.identity_api.list_groups_for_user( user_list[0]['id'], hints=hints) # We should only get back one group since it is the only one that # ends with 'The' self.assertThat(len(groups), matchers.Equals(1)) self.assertEqual(group_list[5]['id'], groups[0]['id']) self._delete_test_data('user', user_list) self._delete_test_data('group', group_list) def test_groups_for_user_exact_filtered(self): """Test exact filters doesn't break groups_for_user listing.""" group_list, user_list = self._groups_for_user_data() hints = driver_hints.Hints() hints.add_filter('name', 'The Ministry', comparator='equals') groups = self.identity_api.list_groups_for_user( user_list[0]['id'], hints=hints) # We should only get back 1 out of the 3 groups with name 'The # Ministry' hence showing that both "filters" have been applied. self.assertEqual(1, len(groups)) self.assertEqual(group_list[6]['id'], groups[0]['id']) self._delete_test_data('user', user_list) self._delete_test_data('group', group_list) def _get_user_name_field_size(self): """Return the size of the user name field for the backend. Subclasses can override this method to indicate that the user name field is limited in length. The user name is the field used in the test that validates that a filter value works even if it's longer than a field. If the backend doesn't limit the value length then return None. """ return None def test_filter_value_wider_than_field(self): # If a filter value is given that's larger than the field in the # backend then no values are returned. user_name_field_size = self._get_user_name_field_size() if user_name_field_size is None: # The backend doesn't limit the size of the user name, so pass this # test. return # Create some users just to make sure would return something if the # filter was ignored. self._create_test_data('user', 2) hints = driver_hints.Hints() value = 'A' * (user_name_field_size + 1) hints.add_filter('name', value) users = self.identity_api.list_users(hints=hints) self.assertEqual([], users) def _list_users_in_group_data(self): number_of_users = 10 user_name_data = { 1: 'Arthur Conan Doyle', 3: 'Arthur Rimbaud', 9: 'Arthur Schopenhauer', } user_list = self._create_test_data( 'user', number_of_users, domain_id=CONF.identity.default_domain_id, name_dict=user_name_data) group = self._create_one_entity( 'group', CONF.identity.default_domain_id, 'Great Writers') for i in range(7): self.identity_api.add_user_to_group(user_list[i]['id'], group['id']) return user_list, group def test_list_users_in_group_inexact_filtered(self): user_list, group = self._list_users_in_group_data() hints = driver_hints.Hints() hints.add_filter('name', 'Arthur', comparator='contains') users = self.identity_api.list_users_in_group(group['id'], hints=hints) self.assertThat(len(users), matchers.Equals(2)) self.assertIn(user_list[1]['id'], [users[0]['id'], users[1]['id']]) self.assertIn(user_list[3]['id'], [users[0]['id'], users[1]['id']]) hints = driver_hints.Hints() hints.add_filter('name', 'Arthur', comparator='startswith') users = self.identity_api.list_users_in_group(group['id'], hints=hints) self.assertThat(len(users), matchers.Equals(2)) self.assertIn(user_list[1]['id'], [users[0]['id'], users[1]['id']]) self.assertIn(user_list[3]['id'], [users[0]['id'], users[1]['id']]) hints = driver_hints.Hints() hints.add_filter('name', 'Doyle', comparator='endswith') users = self.identity_api.list_users_in_group(group['id'], hints=hints) self.assertThat(len(users), matchers.Equals(1)) self.assertEqual(user_list[1]['id'], users[0]['id']) self._delete_test_data('user', user_list) self._delete_entity('group')(group['id']) def test_list_users_in_group_exact_filtered(self): hints = driver_hints.Hints() user_list, group = self._list_users_in_group_data() hints.add_filter('name', 'Arthur Rimbaud', comparator='equals') users = self.identity_api.list_users_in_group(group['id'], hints=hints) self.assertEqual(1, len(users)) self.assertEqual(user_list[3]['id'], users[0]['id']) self._delete_test_data('user', user_list) self._delete_entity('group')(group['id']) class LimitTests(filtering.FilterTests): ENTITIES = ['user', 'group', 'project'] def setUp(self): """Setup for Limit Test Cases.""" self.entity_lists = {} for entity in self.ENTITIES: # Create 20 entities self.entity_lists[entity] = self._create_test_data(entity, 20) self.addCleanup(self.clean_up_entities) def clean_up_entities(self): """Clean up entity test data from Limit Test Cases.""" for entity in self.ENTITIES: self._delete_test_data(entity, self.entity_lists[entity]) del self.entity_lists def _test_list_entity_filtered_and_limited(self, entity): self.config_fixture.config(list_limit=10) # Should get back just 10 entities hints = driver_hints.Hints() entities = self._list_entities(entity)(hints=hints) self.assertEqual(hints.limit['limit'], len(entities)) self.assertTrue(hints.limit['truncated']) # Override with driver specific limit if entity == 'project': self.config_fixture.config(group='resource', list_limit=5) else: self.config_fixture.config(group='identity', list_limit=5) # Should get back just 5 users hints = driver_hints.Hints() entities = self._list_entities(entity)(hints=hints) self.assertEqual(hints.limit['limit'], len(entities)) # Finally, let's pretend we want to get the full list of entities, # even with the limits set, as part of some internal calculation. # Calling the API without a hints list should achieve this, and # return at least the 20 entries we created (there may be other # entities lying around created by other tests/setup). entities = self._list_entities(entity)() self.assertTrue(len(entities) >= 20) self._match_with_list(self.entity_lists[entity], entities) def test_list_users_filtered_and_limited(self): self._test_list_entity_filtered_and_limited('user') def test_list_groups_filtered_and_limited(self): self._test_list_entity_filtered_and_limited('group') def test_list_projects_filtered_and_limited(self): self._test_list_entity_filtered_and_limited('project') keystone-9.0.0/keystone/tests/unit/tests/0000775000567000056710000000000012701407246021651 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/tests/__init__.py0000664000567000056710000000000012701407102023737 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/tests/test_core.py0000664000567000056710000000331712701407102024205 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys import warnings from oslo_log import log from sqlalchemy import exc from testtools import matchers from keystone.tests import unit LOG = log.getLogger(__name__) class BaseTestTestCase(unit.BaseTestCase): def test_unexpected_exit(self): # if a test calls sys.exit it raises rather than exiting. self.assertThat(lambda: sys.exit(), matchers.raises(unit.UnexpectedExit)) class TestTestCase(unit.TestCase): def test_bad_log(self): # If the arguments are invalid for the string in a log it raises an # exception during testing. self.assertThat( lambda: LOG.warning('String %(p1)s %(p2)s', {'p1': 'something'}), matchers.raises(KeyError)) def test_sa_warning(self): self.assertThat( lambda: warnings.warn('test sa warning error', exc.SAWarning), matchers.raises(exc.SAWarning)) def test_deprecation_warnings_are_raised_as_exceptions_in_tests(self): self.assertThat( lambda: warnings.warn('this is deprecated', DeprecationWarning), matchers.raises(DeprecationWarning)) keystone-9.0.0/keystone/tests/unit/tests/test_utils.py0000664000567000056710000000237712701407102024422 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from testtools import matchers from testtools import testcase from keystone.tests.unit import utils class TestWipDecorator(testcase.TestCase): def test_raises_SkipError_when_broken_test_fails(self): @utils.wip('waiting on bug #000000') def test(): raise Exception('i expected a failure - this is a WIP') e = self.assertRaises(testcase.TestSkipped, test) self.assertThat(str(e), matchers.Contains('#000000')) def test_raises_AssertionError_when_test_passes(self): @utils.wip('waiting on bug #000000') def test(): pass # literally e = self.assertRaises(AssertionError, test) self.assertThat(str(e), matchers.Contains('#000000')) keystone-9.0.0/keystone/tests/unit/federation_fixtures.py0000664000567000056710000000203212701407102025116 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. IDP_ENTITY_ID = 'https://localhost/v3/OS-FEDERATION/saml2/idp' IDP_SSO_ENDPOINT = 'https://localhost/v3/OS-FEDERATION/saml2/SSO' # Organization info IDP_ORGANIZATION_NAME = 'ACME INC' IDP_ORGANIZATION_DISPLAY_NAME = 'ACME' IDP_ORGANIZATION_URL = 'https://acme.example.com' # Contact info IDP_CONTACT_COMPANY = 'ACME Sub' IDP_CONTACT_GIVEN_NAME = 'Joe' IDP_CONTACT_SURNAME = 'Hacker' IDP_CONTACT_EMAIL = 'joe@acme.example.com' IDP_CONTACT_TELEPHONE_NUMBER = '1234567890' IDP_CONTACT_TYPE = 'technical' keystone-9.0.0/keystone/tests/unit/test_policy.py0000664000567000056710000002247212701407102023415 0ustar jenkinsjenkins00000000000000# Copyright 2011 Piston Cloud Computing, Inc. # All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import os from oslo_policy import policy as common_policy import six from testtools import matchers from keystone import exception from keystone.policy.backends import rules from keystone.tests import unit from keystone.tests.unit import ksfixtures from keystone.tests.unit.ksfixtures import temporaryfile class PolicyFileTestCase(unit.TestCase): def setUp(self): # self.tmpfilename should exist before setUp super is called # this is to ensure it is available for the config_fixture in # the config_overrides call. self.tempfile = self.useFixture(temporaryfile.SecureTempFile()) self.tmpfilename = self.tempfile.file_name super(PolicyFileTestCase, self).setUp() self.target = {} def _policy_fixture(self): return ksfixtures.Policy(self.tmpfilename, self.config_fixture) def test_modified_policy_reloads(self): action = "example:test" empty_credentials = {} with open(self.tmpfilename, "w") as policyfile: policyfile.write("""{"example:test": []}""") rules.enforce(empty_credentials, action, self.target) with open(self.tmpfilename, "w") as policyfile: policyfile.write("""{"example:test": ["false:false"]}""") rules._ENFORCER.clear() self.assertRaises(exception.ForbiddenAction, rules.enforce, empty_credentials, action, self.target) def test_invalid_policy_raises_error(self): action = "example:test" empty_credentials = {} invalid_json = '{"example:test": [],}' with open(self.tmpfilename, "w") as policyfile: policyfile.write(invalid_json) self.assertRaises(ValueError, rules.enforce, empty_credentials, action, self.target) class PolicyTestCase(unit.TestCase): def setUp(self): super(PolicyTestCase, self).setUp() self.rules = { "true": [], "example:allowed": [], "example:denied": [["false:false"]], "example:get_http": [["http:http://www.example.com"]], "example:my_file": [["role:compute_admin"], ["project_id:%(project_id)s"]], "example:early_and_fail": [["false:false", "rule:true"]], "example:early_or_success": [["rule:true"], ["false:false"]], "example:lowercase_admin": [["role:admin"], ["role:sysadmin"]], "example:uppercase_admin": [["role:ADMIN"], ["role:sysadmin"]], } # NOTE(vish): then overload underlying policy engine self._set_rules() self.credentials = {} self.target = {} def _set_rules(self): these_rules = common_policy.Rules.from_dict(self.rules) rules._ENFORCER.set_rules(these_rules) def test_enforce_nonexistent_action_throws(self): action = "example:noexist" self.assertRaises(exception.ForbiddenAction, rules.enforce, self.credentials, action, self.target) def test_enforce_bad_action_throws(self): action = "example:denied" self.assertRaises(exception.ForbiddenAction, rules.enforce, self.credentials, action, self.target) def test_enforce_good_action(self): action = "example:allowed" rules.enforce(self.credentials, action, self.target) def test_templatized_enforcement(self): target_mine = {'project_id': 'fake'} target_not_mine = {'project_id': 'another'} credentials = {'project_id': 'fake', 'roles': []} action = "example:my_file" rules.enforce(credentials, action, target_mine) self.assertRaises(exception.ForbiddenAction, rules.enforce, credentials, action, target_not_mine) def test_early_AND_enforcement(self): action = "example:early_and_fail" self.assertRaises(exception.ForbiddenAction, rules.enforce, self.credentials, action, self.target) def test_early_OR_enforcement(self): action = "example:early_or_success" rules.enforce(self.credentials, action, self.target) def test_ignore_case_role_check(self): lowercase_action = "example:lowercase_admin" uppercase_action = "example:uppercase_admin" # NOTE(dprince): We mix case in the Admin role here to ensure # case is ignored admin_credentials = {'roles': ['AdMiN']} rules.enforce(admin_credentials, lowercase_action, self.target) rules.enforce(admin_credentials, uppercase_action, self.target) class DefaultPolicyTestCase(unit.TestCase): def setUp(self): super(DefaultPolicyTestCase, self).setUp() self.rules = { "default": [], "example:exist": [["false:false"]] } self._set_rules('default') self.credentials = {} # FIXME(gyee): latest Oslo policy Enforcer class reloads the rules in # its enforce() method even though rules has been initialized via # set_rules(). To make it easier to do our tests, we're going to # monkeypatch load_roles() so it does nothing. This seem like a bug in # Oslo policy as we shouldn't have to reload the rules if they have # already been set using set_rules(). self._old_load_rules = rules._ENFORCER.load_rules self.addCleanup(setattr, rules._ENFORCER, 'load_rules', self._old_load_rules) rules._ENFORCER.load_rules = lambda *args, **kwargs: None def _set_rules(self, default_rule): these_rules = common_policy.Rules.from_dict(self.rules, default_rule) rules._ENFORCER.set_rules(these_rules) def test_policy_called(self): self.assertRaises(exception.ForbiddenAction, rules.enforce, self.credentials, "example:exist", {}) def test_not_found_policy_calls_default(self): rules.enforce(self.credentials, "example:noexist", {}) def test_default_not_found(self): new_default_rule = "default_noexist" # FIXME(gyee): need to overwrite the Enforcer's default_rule first # as it is recreating the rules with its own default_rule instead # of the default_rule passed in from set_rules(). I think this is a # bug in Oslo policy. rules._ENFORCER.default_rule = new_default_rule self._set_rules(new_default_rule) self.assertRaises(exception.ForbiddenAction, rules.enforce, self.credentials, "example:noexist", {}) class PolicyJsonTestCase(unit.TestCase): def _load_entries(self, filename): return set(json.load(open(filename))) def test_json_examples_have_matching_entries(self): policy_keys = self._load_entries(unit.dirs.etc('policy.json')) cloud_policy_keys = self._load_entries( unit.dirs.etc('policy.v3cloudsample.json')) policy_extra_keys = ['admin_or_token_subject', 'service_admin_or_token_subject', 'token_subject', ] expected_policy_keys = list(cloud_policy_keys) + policy_extra_keys diffs = set(policy_keys).difference(set(expected_policy_keys)) self.assertThat(diffs, matchers.Equals(set())) def test_all_targets_documented(self): # All the targets in the sample policy file must be documented in # doc/source/policy_mapping.rst. policy_keys = self._load_entries(unit.dirs.etc('policy.json')) # These keys are in the policy.json but aren't targets. policy_rule_keys = [ 'admin_or_owner', 'admin_or_token_subject', 'admin_required', 'default', 'owner', 'service_admin_or_token_subject', 'service_or_admin', 'service_role', 'token_subject', ] def read_doc_targets(): # Parse the doc/source/policy_mapping.rst file and return the # targets. doc_path = os.path.join( unit.ROOTDIR, 'doc', 'source', 'policy_mapping.rst') with open(doc_path) as doc_file: for line in doc_file: if line.startswith('Target'): break for line in doc_file: # Skip === line if line.startswith('==='): break for line in doc_file: line = line.rstrip() if not line or line.startswith(' '): continue if line.startswith('=='): break target, dummy, dummy = line.partition(' ') yield six.text_type(target) doc_targets = list(read_doc_targets()) self.assertItemsEqual(policy_keys, doc_targets + policy_rule_keys) keystone-9.0.0/keystone/tests/unit/test_token_provider.py0000664000567000056710000007233112701407102025147 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from oslo_config import cfg from oslo_utils import timeutils from six.moves import reload_module from keystone.common import dependency from keystone.common import utils from keystone import exception from keystone.tests import unit from keystone.tests.unit.ksfixtures import database from keystone import token from keystone.token.providers import fernet from keystone.token.providers import pki from keystone.token.providers import pkiz from keystone.token.providers import uuid CONF = cfg.CONF FUTURE_DELTA = datetime.timedelta(seconds=CONF.token.expiration) CURRENT_DATE = timeutils.utcnow() SAMPLE_V2_TOKEN = { "access": { "trust": { "id": "abc123", "trustee_user_id": "123456", "trustor_user_id": "333333", "impersonation": False }, "serviceCatalog": [ { "endpoints": [ { "adminURL": "http://localhost:8774/v1.1/01257", "id": "51934fe63a5b4ac0a32664f64eb462c3", "internalURL": "http://localhost:8774/v1.1/01257", "publicURL": "http://localhost:8774/v1.1/01257", "region": "RegionOne" } ], "endpoints_links": [], "name": "nova", "type": "compute" }, { "endpoints": [ { "adminURL": "http://localhost:9292", "id": "aaa17a539e364297a7845d67c7c7cc4b", "internalURL": "http://localhost:9292", "publicURL": "http://localhost:9292", "region": "RegionOne" } ], "endpoints_links": [], "name": "glance", "type": "image" }, { "endpoints": [ { "adminURL": "http://localhost:8776/v1/01257", "id": "077d82df25304abeac2294004441db5a", "internalURL": "http://localhost:8776/v1/01257", "publicURL": "http://localhost:8776/v1/01257", "region": "RegionOne" } ], "endpoints_links": [], "name": "volume", "type": "volume" }, { "endpoints": [ { "adminURL": "http://localhost:8773/services/Admin", "id": "b06997fd08414903ad458836efaa9067", "internalURL": "http://localhost:8773/services/Cloud", "publicURL": "http://localhost:8773/services/Cloud", "region": "RegionOne" } ], "endpoints_links": [], "name": "ec2", "type": "ec2" }, { "endpoints": [ { "adminURL": "http://localhost:8080/v1", "id": "7bd0c643e05a4a2ab40902b2fa0dd4e6", "internalURL": "http://localhost:8080/v1/AUTH_01257", "publicURL": "http://localhost:8080/v1/AUTH_01257", "region": "RegionOne" } ], "endpoints_links": [], "name": "swift", "type": "object-store" }, { "endpoints": [ { "adminURL": "http://localhost:35357/v2.0", "id": "02850c5d1d094887bdc46e81e1e15dc7", "internalURL": "http://localhost:5000/v2.0", "publicURL": "http://localhost:5000/v2.0", "region": "RegionOne" } ], "endpoints_links": [], "name": "keystone", "type": "identity" } ], "token": { "expires": "2013-05-22T00:02:43.941430Z", "id": "ce4fc2d36eea4cc9a36e666ac2f1029a", "issued_at": "2013-05-21T00:02:43.941473Z", "tenant": { "enabled": True, "id": "01257", "name": "service" } }, "user": { "id": "f19ddbe2c53c46f189fe66d0a7a9c9ce", "name": "nova", "roles": [ { "name": "_member_" }, { "name": "admin" } ], "roles_links": [], "username": "nova" } } } SAMPLE_V3_TOKEN = { "token": { "catalog": [ { "endpoints": [ { "id": "02850c5d1d094887bdc46e81e1e15dc7", "interface": "admin", "region": "RegionOne", "url": "http://localhost:35357/v2.0" }, { "id": "446e244b75034a9ab4b0811e82d0b7c8", "interface": "internal", "region": "RegionOne", "url": "http://localhost:5000/v2.0" }, { "id": "47fa3d9f499240abb5dfcf2668f168cd", "interface": "public", "region": "RegionOne", "url": "http://localhost:5000/v2.0" } ], "id": "26d7541715a44a4d9adad96f9872b633", "type": "identity", }, { "endpoints": [ { "id": "aaa17a539e364297a7845d67c7c7cc4b", "interface": "admin", "region": "RegionOne", "url": "http://localhost:9292" }, { "id": "4fa9620e42394cb1974736dce0856c71", "interface": "internal", "region": "RegionOne", "url": "http://localhost:9292" }, { "id": "9673687f9bc441d88dec37942bfd603b", "interface": "public", "region": "RegionOne", "url": "http://localhost:9292" } ], "id": "d27a41843f4e4b0e8cf6dac4082deb0d", "type": "image", }, { "endpoints": [ { "id": "7bd0c643e05a4a2ab40902b2fa0dd4e6", "interface": "admin", "region": "RegionOne", "url": "http://localhost:8080/v1" }, { "id": "43bef154594d4ccb8e49014d20624e1d", "interface": "internal", "region": "RegionOne", "url": "http://localhost:8080/v1/AUTH_01257" }, { "id": "e63b5f5d7aa3493690189d0ff843b9b3", "interface": "public", "region": "RegionOne", "url": "http://localhost:8080/v1/AUTH_01257" } ], "id": "a669e152f1104810a4b6701aade721bb", "type": "object-store", }, { "endpoints": [ { "id": "51934fe63a5b4ac0a32664f64eb462c3", "interface": "admin", "region": "RegionOne", "url": "http://localhost:8774/v1.1/01257" }, { "id": "869b535eea0d42e483ae9da0d868ebad", "interface": "internal", "region": "RegionOne", "url": "http://localhost:8774/v1.1/01257" }, { "id": "93583824c18f4263a2245ca432b132a6", "interface": "public", "region": "RegionOne", "url": "http://localhost:8774/v1.1/01257" } ], "id": "7f32cc2af6c9476e82d75f80e8b3bbb8", "type": "compute", }, { "endpoints": [ { "id": "b06997fd08414903ad458836efaa9067", "interface": "admin", "region": "RegionOne", "url": "http://localhost:8773/services/Admin" }, { "id": "411f7de7c9a8484c9b46c254fb2676e2", "interface": "internal", "region": "RegionOne", "url": "http://localhost:8773/services/Cloud" }, { "id": "f21c93f3da014785854b4126d0109c49", "interface": "public", "region": "RegionOne", "url": "http://localhost:8773/services/Cloud" } ], "id": "b08c9c7d4ef543eba5eeb766f72e5aa1", "type": "ec2", }, { "endpoints": [ { "id": "077d82df25304abeac2294004441db5a", "interface": "admin", "region": "RegionOne", "url": "http://localhost:8776/v1/01257" }, { "id": "875bf282362c40219665278b4fd11467", "interface": "internal", "region": "RegionOne", "url": "http://localhost:8776/v1/01257" }, { "id": "cd229aa6df0640dc858a8026eb7e640c", "interface": "public", "region": "RegionOne", "url": "http://localhost:8776/v1/01257" } ], "id": "5db21b82617f4a95816064736a7bec22", "type": "volume", } ], "expires_at": "2013-05-22T00:02:43.941430Z", "issued_at": "2013-05-21T00:02:43.941473Z", "methods": [ "password" ], "project": { "domain": { "id": "default", "name": "Default" }, "id": "01257", "name": "service" }, "roles": [ { "id": "9fe2ff9ee4384b1894a90878d3e92bab", "name": "_member_" }, { "id": "53bff13443bd4450b97f978881d47b18", "name": "admin" } ], "user": { "domain": { "id": "default", "name": "Default" }, "id": "f19ddbe2c53c46f189fe66d0a7a9c9ce", "name": "nova" }, "OS-TRUST:trust": { "id": "abc123", "trustee_user_id": "123456", "trustor_user_id": "333333", "impersonation": False } } } SAMPLE_V2_TOKEN_WITH_EMBEDED_VERSION = { "access": { "trust": { "id": "abc123", "trustee_user_id": "123456", "trustor_user_id": "333333", "impersonation": False }, "serviceCatalog": [ { "endpoints": [ { "adminURL": "http://localhost:8774/v1.1/01257", "id": "51934fe63a5b4ac0a32664f64eb462c3", "internalURL": "http://localhost:8774/v1.1/01257", "publicURL": "http://localhost:8774/v1.1/01257", "region": "RegionOne" } ], "endpoints_links": [], "name": "nova", "type": "compute" }, { "endpoints": [ { "adminURL": "http://localhost:9292", "id": "aaa17a539e364297a7845d67c7c7cc4b", "internalURL": "http://localhost:9292", "publicURL": "http://localhost:9292", "region": "RegionOne" } ], "endpoints_links": [], "name": "glance", "type": "image" }, { "endpoints": [ { "adminURL": "http://localhost:8776/v1/01257", "id": "077d82df25304abeac2294004441db5a", "internalURL": "http://localhost:8776/v1/01257", "publicURL": "http://localhost:8776/v1/01257", "region": "RegionOne" } ], "endpoints_links": [], "name": "volume", "type": "volume" }, { "endpoints": [ { "adminURL": "http://localhost:8773/services/Admin", "id": "b06997fd08414903ad458836efaa9067", "internalURL": "http://localhost:8773/services/Cloud", "publicURL": "http://localhost:8773/services/Cloud", "region": "RegionOne" } ], "endpoints_links": [], "name": "ec2", "type": "ec2" }, { "endpoints": [ { "adminURL": "http://localhost:8080/v1", "id": "7bd0c643e05a4a2ab40902b2fa0dd4e6", "internalURL": "http://localhost:8080/v1/AUTH_01257", "publicURL": "http://localhost:8080/v1/AUTH_01257", "region": "RegionOne" } ], "endpoints_links": [], "name": "swift", "type": "object-store" }, { "endpoints": [ { "adminURL": "http://localhost:35357/v2.0", "id": "02850c5d1d094887bdc46e81e1e15dc7", "internalURL": "http://localhost:5000/v2.0", "publicURL": "http://localhost:5000/v2.0", "region": "RegionOne" } ], "endpoints_links": [], "name": "keystone", "type": "identity" } ], "token": { "expires": "2013-05-22T00:02:43.941430Z", "id": "ce4fc2d36eea4cc9a36e666ac2f1029a", "issued_at": "2013-05-21T00:02:43.941473Z", "tenant": { "enabled": True, "id": "01257", "name": "service" } }, "user": { "id": "f19ddbe2c53c46f189fe66d0a7a9c9ce", "name": "nova", "roles": [ { "name": "_member_" }, { "name": "admin" } ], "roles_links": [], "username": "nova" } }, 'token_version': 'v2.0' } SAMPLE_V3_TOKEN_WITH_EMBEDED_VERSION = { "token": { "catalog": [ { "endpoints": [ { "id": "02850c5d1d094887bdc46e81e1e15dc7", "interface": "admin", "region": "RegionOne", "url": "http://localhost:35357/v2.0" }, { "id": "446e244b75034a9ab4b0811e82d0b7c8", "interface": "internal", "region": "RegionOne", "url": "http://localhost:5000/v2.0" }, { "id": "47fa3d9f499240abb5dfcf2668f168cd", "interface": "public", "region": "RegionOne", "url": "http://localhost:5000/v2.0" } ], "id": "26d7541715a44a4d9adad96f9872b633", "type": "identity", }, { "endpoints": [ { "id": "aaa17a539e364297a7845d67c7c7cc4b", "interface": "admin", "region": "RegionOne", "url": "http://localhost:9292" }, { "id": "4fa9620e42394cb1974736dce0856c71", "interface": "internal", "region": "RegionOne", "url": "http://localhost:9292" }, { "id": "9673687f9bc441d88dec37942bfd603b", "interface": "public", "region": "RegionOne", "url": "http://localhost:9292" } ], "id": "d27a41843f4e4b0e8cf6dac4082deb0d", "type": "image", }, { "endpoints": [ { "id": "7bd0c643e05a4a2ab40902b2fa0dd4e6", "interface": "admin", "region": "RegionOne", "url": "http://localhost:8080/v1" }, { "id": "43bef154594d4ccb8e49014d20624e1d", "interface": "internal", "region": "RegionOne", "url": "http://localhost:8080/v1/AUTH_01257" }, { "id": "e63b5f5d7aa3493690189d0ff843b9b3", "interface": "public", "region": "RegionOne", "url": "http://localhost:8080/v1/AUTH_01257" } ], "id": "a669e152f1104810a4b6701aade721bb", "type": "object-store", }, { "endpoints": [ { "id": "51934fe63a5b4ac0a32664f64eb462c3", "interface": "admin", "region": "RegionOne", "url": "http://localhost:8774/v1.1/01257" }, { "id": "869b535eea0d42e483ae9da0d868ebad", "interface": "internal", "region": "RegionOne", "url": "http://localhost:8774/v1.1/01257" }, { "id": "93583824c18f4263a2245ca432b132a6", "interface": "public", "region": "RegionOne", "url": "http://localhost:8774/v1.1/01257" } ], "id": "7f32cc2af6c9476e82d75f80e8b3bbb8", "type": "compute", }, { "endpoints": [ { "id": "b06997fd08414903ad458836efaa9067", "interface": "admin", "region": "RegionOne", "url": "http://localhost:8773/services/Admin" }, { "id": "411f7de7c9a8484c9b46c254fb2676e2", "interface": "internal", "region": "RegionOne", "url": "http://localhost:8773/services/Cloud" }, { "id": "f21c93f3da014785854b4126d0109c49", "interface": "public", "region": "RegionOne", "url": "http://localhost:8773/services/Cloud" } ], "id": "b08c9c7d4ef543eba5eeb766f72e5aa1", "type": "ec2", }, { "endpoints": [ { "id": "077d82df25304abeac2294004441db5a", "interface": "admin", "region": "RegionOne", "url": "http://localhost:8776/v1/01257" }, { "id": "875bf282362c40219665278b4fd11467", "interface": "internal", "region": "RegionOne", "url": "http://localhost:8776/v1/01257" }, { "id": "cd229aa6df0640dc858a8026eb7e640c", "interface": "public", "region": "RegionOne", "url": "http://localhost:8776/v1/01257" } ], "id": "5db21b82617f4a95816064736a7bec22", "type": "volume", } ], "expires_at": "2013-05-22T00:02:43.941430Z", "issued_at": "2013-05-21T00:02:43.941473Z", "methods": [ "password" ], "project": { "domain": { "id": "default", "name": "Default" }, "id": "01257", "name": "service" }, "roles": [ { "id": "9fe2ff9ee4384b1894a90878d3e92bab", "name": "_member_" }, { "id": "53bff13443bd4450b97f978881d47b18", "name": "admin" } ], "user": { "domain": { "id": "default", "name": "Default" }, "id": "f19ddbe2c53c46f189fe66d0a7a9c9ce", "name": "nova" }, "OS-TRUST:trust": { "id": "abc123", "trustee_user_id": "123456", "trustor_user_id": "333333", "impersonation": False } }, 'token_version': 'v3.0' } def create_v2_token(): return { "access": { "token": { "expires": utils.isotime(timeutils.utcnow() + FUTURE_DELTA), "issued_at": "2013-05-21T00:02:43.941473Z", "tenant": { "enabled": True, "id": "01257", "name": "service" } } } } SAMPLE_V2_TOKEN_EXPIRED = { "access": { "token": { "expires": utils.isotime(CURRENT_DATE), "issued_at": "2013-05-21T00:02:43.941473Z", "tenant": { "enabled": True, "id": "01257", "name": "service" } } } } def create_v3_token(): return { "token": { 'methods': [], "expires_at": utils.isotime(timeutils.utcnow() + FUTURE_DELTA), "issued_at": "2013-05-21T00:02:43.941473Z", } } SAMPLE_V3_TOKEN_EXPIRED = { "token": { "expires_at": utils.isotime(CURRENT_DATE), "issued_at": "2013-05-21T00:02:43.941473Z", } } SAMPLE_MALFORMED_TOKEN = { "token": { "bogus": { "no expiration data": None } } } class TestTokenProvider(unit.TestCase): def setUp(self): super(TestTokenProvider, self).setUp() self.useFixture(database.Database()) self.load_backends() def test_get_token_version(self): self.assertEqual( token.provider.V2, self.token_provider_api.get_token_version(SAMPLE_V2_TOKEN)) self.assertEqual( token.provider.V2, self.token_provider_api.get_token_version( SAMPLE_V2_TOKEN_WITH_EMBEDED_VERSION)) self.assertEqual( token.provider.V3, self.token_provider_api.get_token_version(SAMPLE_V3_TOKEN)) self.assertEqual( token.provider.V3, self.token_provider_api.get_token_version( SAMPLE_V3_TOKEN_WITH_EMBEDED_VERSION)) self.assertRaises(exception.UnsupportedTokenVersionException, self.token_provider_api.get_token_version, 'bogus') def test_supported_token_providers(self): # test default config dependency.reset() self.assertIsInstance(token.provider.Manager().driver, uuid.Provider) dependency.reset() self.config_fixture.config(group='token', provider='uuid') self.assertIsInstance(token.provider.Manager().driver, uuid.Provider) dependency.reset() self.config_fixture.config(group='token', provider='pki') self.assertIsInstance(token.provider.Manager().driver, pki.Provider) dependency.reset() self.config_fixture.config(group='token', provider='pkiz') self.assertIsInstance(token.provider.Manager().driver, pkiz.Provider) dependency.reset() self.config_fixture.config(group='token', provider='fernet') self.assertIsInstance(token.provider.Manager().driver, fernet.Provider) def test_unsupported_token_provider(self): self.config_fixture.config(group='token', provider='my.package.MyProvider') self.assertRaises(ImportError, token.provider.Manager) def test_provider_token_expiration_validation(self): self.assertRaises(exception.TokenNotFound, self.token_provider_api._is_valid_token, SAMPLE_V2_TOKEN_EXPIRED) self.assertRaises(exception.TokenNotFound, self.token_provider_api._is_valid_token, SAMPLE_V3_TOKEN_EXPIRED) self.assertRaises(exception.TokenNotFound, self.token_provider_api._is_valid_token, SAMPLE_MALFORMED_TOKEN) self.assertIsNone( self.token_provider_api._is_valid_token(create_v2_token())) self.assertIsNone( self.token_provider_api._is_valid_token(create_v3_token())) def test_no_token_raises_token_not_found(self): self.assertRaises( exception.TokenNotFound, self.token_provider_api.validate_token, None) # NOTE(ayoung): renamed to avoid automatic test detection class PKIProviderTests(object): def setUp(self): super(PKIProviderTests, self).setUp() from keystoneclient.common import cms self.cms = cms from keystone.common import environment self.environment = environment old_cms_subprocess = cms.subprocess self.addCleanup(setattr, cms, 'subprocess', old_cms_subprocess) old_env_subprocess = environment.subprocess self.addCleanup(setattr, environment, 'subprocess', old_env_subprocess) self.cms.subprocess = self.target_subprocess self.environment.subprocess = self.target_subprocess # force module reload so the imports get re-evaluated reload_module(pki) def test_get_token_id_error_handling(self): # cause command-line failure self.config_fixture.config(group='signing', keyfile='--please-break-me') provider = pki.Provider() token_data = {} self.assertRaises(exception.UnexpectedError, provider._get_token_id, token_data) class TestPKIProviderWithEventlet(PKIProviderTests, unit.TestCase): def setUp(self): # force keystoneclient.common.cms to use eventlet's subprocess from eventlet.green import subprocess self.target_subprocess = subprocess super(TestPKIProviderWithEventlet, self).setUp() class TestPKIProviderWithStdlib(PKIProviderTests, unit.TestCase): def setUp(self): # force keystoneclient.common.cms to use the stdlib subprocess import subprocess self.target_subprocess = subprocess super(TestPKIProviderWithStdlib, self).setUp() keystone-9.0.0/keystone/tests/unit/test_v3_assignment.py0000664000567000056710000036564612701407105024716 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import random import uuid from oslo_config import cfg from six.moves import http_client from six.moves import range from testtools import matchers from keystone.tests import unit from keystone.tests.unit import test_v3 CONF = cfg.CONF class AssignmentTestCase(test_v3.RestfulTestCase, test_v3.AssignmentTestMixin): """Test roles and role assignments.""" def setUp(self): super(AssignmentTestCase, self).setUp() self.group = unit.new_group_ref(domain_id=self.domain_id) self.group = self.identity_api.create_group(self.group) self.group_id = self.group['id'] # Role CRUD tests def test_create_role(self): """Call ``POST /roles``.""" ref = unit.new_role_ref() r = self.post( '/roles', body={'role': ref}) return self.assertValidRoleResponse(r, ref) def test_create_role_bad_request(self): """Call ``POST /roles``.""" self.post('/roles', body={'role': {}}, expected_status=http_client.BAD_REQUEST) def test_list_roles(self): """Call ``GET /roles``.""" resource_url = '/roles' r = self.get(resource_url) self.assertValidRoleListResponse(r, ref=self.role, resource_url=resource_url) def test_get_role(self): """Call ``GET /roles/{role_id}``.""" r = self.get('/roles/%(role_id)s' % { 'role_id': self.role_id}) self.assertValidRoleResponse(r, self.role) def test_update_role(self): """Call ``PATCH /roles/{role_id}``.""" ref = unit.new_role_ref() del ref['id'] r = self.patch('/roles/%(role_id)s' % { 'role_id': self.role_id}, body={'role': ref}) self.assertValidRoleResponse(r, ref) def test_delete_role(self): """Call ``DELETE /roles/{role_id}``.""" self.delete('/roles/%(role_id)s' % { 'role_id': self.role_id}) def test_create_member_role(self): """Call ``POST /roles``.""" # specify only the name on creation ref = unit.new_role_ref(name=CONF.member_role_name) r = self.post( '/roles', body={'role': ref}) self.assertValidRoleResponse(r, ref) # but the ID should be set as defined in CONF self.assertEqual(CONF.member_role_id, r.json['role']['id']) # Role Grants tests def test_crud_user_project_role_grants(self): role = unit.new_role_ref() self.role_api.create_role(role['id'], role) collection_url = ( '/projects/%(project_id)s/users/%(user_id)s/roles' % { 'project_id': self.project['id'], 'user_id': self.user['id']}) member_url = '%(collection_url)s/%(role_id)s' % { 'collection_url': collection_url, 'role_id': role['id']} # There is a role assignment for self.user on self.project r = self.get(collection_url) self.assertValidRoleListResponse(r, ref=self.role, expected_length=1) self.put(member_url) self.head(member_url) r = self.get(collection_url) self.assertValidRoleListResponse(r, ref=role, resource_url=collection_url, expected_length=2) self.delete(member_url) r = self.get(collection_url) self.assertValidRoleListResponse(r, ref=self.role, expected_length=1) self.assertIn(collection_url, r.result['links']['self']) def test_crud_user_project_role_grants_no_user(self): """Grant role on a project to a user that doesn't exist. When grant a role on a project to a user that doesn't exist, the server returns Not Found for the user. """ user_id = uuid.uuid4().hex collection_url = ( '/projects/%(project_id)s/users/%(user_id)s/roles' % { 'project_id': self.project['id'], 'user_id': user_id}) member_url = '%(collection_url)s/%(role_id)s' % { 'collection_url': collection_url, 'role_id': self.role_id} self.put(member_url, expected_status=http_client.NOT_FOUND) def test_crud_user_domain_role_grants(self): collection_url = ( '/domains/%(domain_id)s/users/%(user_id)s/roles' % { 'domain_id': self.domain_id, 'user_id': self.user['id']}) member_url = '%(collection_url)s/%(role_id)s' % { 'collection_url': collection_url, 'role_id': self.role_id} self.put(member_url) self.head(member_url) r = self.get(collection_url) self.assertValidRoleListResponse(r, ref=self.role, resource_url=collection_url) self.delete(member_url) r = self.get(collection_url) self.assertValidRoleListResponse(r, expected_length=0, resource_url=collection_url) def test_crud_user_domain_role_grants_no_user(self): """Grant role on a domain to a user that doesn't exist. When grant a role on a domain to a user that doesn't exist, the server returns 404 Not Found for the user. """ user_id = uuid.uuid4().hex collection_url = ( '/domains/%(domain_id)s/users/%(user_id)s/roles' % { 'domain_id': self.domain_id, 'user_id': user_id}) member_url = '%(collection_url)s/%(role_id)s' % { 'collection_url': collection_url, 'role_id': self.role_id} self.put(member_url, expected_status=http_client.NOT_FOUND) def test_crud_group_project_role_grants(self): collection_url = ( '/projects/%(project_id)s/groups/%(group_id)s/roles' % { 'project_id': self.project_id, 'group_id': self.group_id}) member_url = '%(collection_url)s/%(role_id)s' % { 'collection_url': collection_url, 'role_id': self.role_id} self.put(member_url) self.head(member_url) r = self.get(collection_url) self.assertValidRoleListResponse(r, ref=self.role, resource_url=collection_url) self.delete(member_url) r = self.get(collection_url) self.assertValidRoleListResponse(r, expected_length=0, resource_url=collection_url) def test_crud_group_project_role_grants_no_group(self): """Grant role on a project to a group that doesn't exist. When grant a role on a project to a group that doesn't exist, the server returns 404 Not Found for the group. """ group_id = uuid.uuid4().hex collection_url = ( '/projects/%(project_id)s/groups/%(group_id)s/roles' % { 'project_id': self.project_id, 'group_id': group_id}) member_url = '%(collection_url)s/%(role_id)s' % { 'collection_url': collection_url, 'role_id': self.role_id} self.put(member_url, expected_status=http_client.NOT_FOUND) def test_crud_group_domain_role_grants(self): collection_url = ( '/domains/%(domain_id)s/groups/%(group_id)s/roles' % { 'domain_id': self.domain_id, 'group_id': self.group_id}) member_url = '%(collection_url)s/%(role_id)s' % { 'collection_url': collection_url, 'role_id': self.role_id} self.put(member_url) self.head(member_url) r = self.get(collection_url) self.assertValidRoleListResponse(r, ref=self.role, resource_url=collection_url) self.delete(member_url) r = self.get(collection_url) self.assertValidRoleListResponse(r, expected_length=0, resource_url=collection_url) def test_crud_group_domain_role_grants_no_group(self): """Grant role on a domain to a group that doesn't exist. When grant a role on a domain to a group that doesn't exist, the server returns 404 Not Found for the group. """ group_id = uuid.uuid4().hex collection_url = ( '/domains/%(domain_id)s/groups/%(group_id)s/roles' % { 'domain_id': self.domain_id, 'group_id': group_id}) member_url = '%(collection_url)s/%(role_id)s' % { 'collection_url': collection_url, 'role_id': self.role_id} self.put(member_url, expected_status=http_client.NOT_FOUND) def _create_new_user_and_assign_role_on_project(self): """Create a new user and assign user a role on a project.""" # Create a new user new_user = unit.new_user_ref(domain_id=self.domain_id) user_ref = self.identity_api.create_user(new_user) # Assign the user a role on the project collection_url = ( '/projects/%(project_id)s/users/%(user_id)s/roles' % { 'project_id': self.project_id, 'user_id': user_ref['id']}) member_url = ('%(collection_url)s/%(role_id)s' % { 'collection_url': collection_url, 'role_id': self.role_id}) self.put(member_url) # Check the user has the role assigned self.head(member_url) return member_url, user_ref def test_delete_user_before_removing_role_assignment_succeeds(self): """Call ``DELETE`` on the user before the role assignment.""" member_url, user = self._create_new_user_and_assign_role_on_project() # Delete the user from identity backend self.identity_api.driver.delete_user(user['id']) # Clean up the role assignment self.delete(member_url) # Make sure the role is gone self.head(member_url, expected_status=http_client.NOT_FOUND) def test_delete_user_and_check_role_assignment_fails(self): """Call ``DELETE`` on the user and check the role assignment.""" member_url, user = self._create_new_user_and_assign_role_on_project() # Delete the user from identity backend self.identity_api.delete_user(user['id']) # We should get a 404 Not Found when looking for the user in the # identity backend because we're not performing a delete operation on # the role. self.head(member_url, expected_status=http_client.NOT_FOUND) def test_token_revoked_once_group_role_grant_revoked(self): """Test token is revoked when group role grant is revoked When a role granted to a group is revoked for a given scope, all tokens related to this scope and belonging to one of the members of this group should be revoked. The revocation should be independently to the presence of the revoke API. """ # creates grant from group on project. self.assignment_api.create_grant(role_id=self.role['id'], project_id=self.project['id'], group_id=self.group['id']) # adds user to the group. self.identity_api.add_user_to_group(user_id=self.user['id'], group_id=self.group['id']) # creates a token for the user auth_body = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=self.project['id']) token_resp = self.post('/auth/tokens', body=auth_body) token = token_resp.headers.get('x-subject-token') # validates the returned token; it should be valid. self.head('/auth/tokens', headers={'x-subject-token': token}, expected_status=http_client.OK) # revokes the grant from group on project. self.assignment_api.delete_grant(role_id=self.role['id'], project_id=self.project['id'], group_id=self.group['id']) # validates the same token again; it should not longer be valid. self.head('/auth/tokens', headers={'x-subject-token': token}, expected_status=http_client.NOT_FOUND) @unit.skip_if_cache_disabled('assignment') def test_delete_grant_from_user_and_project_invalidate_cache(self): # create a new project new_project = unit.new_project_ref(domain_id=self.domain_id) self.resource_api.create_project(new_project['id'], new_project) collection_url = ( '/projects/%(project_id)s/users/%(user_id)s/roles' % { 'project_id': new_project['id'], 'user_id': self.user['id']}) member_url = '%(collection_url)s/%(role_id)s' % { 'collection_url': collection_url, 'role_id': self.role_id} # create the user a grant on the new project self.put(member_url) # check the grant that was just created self.head(member_url) resp = self.get(collection_url) self.assertValidRoleListResponse(resp, ref=self.role, resource_url=collection_url) # delete the grant self.delete(member_url) # get the collection and ensure there are no roles on the project resp = self.get(collection_url) self.assertListEqual(resp.json_body['roles'], []) @unit.skip_if_cache_disabled('assignment') def test_delete_grant_from_user_and_domain_invalidates_cache(self): # create a new domain new_domain = unit.new_domain_ref() self.resource_api.create_domain(new_domain['id'], new_domain) collection_url = ( '/domains/%(domain_id)s/users/%(user_id)s/roles' % { 'domain_id': new_domain['id'], 'user_id': self.user['id']}) member_url = '%(collection_url)s/%(role_id)s' % { 'collection_url': collection_url, 'role_id': self.role_id} # create the user a grant on the new domain self.put(member_url) # check the grant that was just created self.head(member_url) resp = self.get(collection_url) self.assertValidRoleListResponse(resp, ref=self.role, resource_url=collection_url) # delete the grant self.delete(member_url) # get the collection and ensure there are no roles on the domain resp = self.get(collection_url) self.assertListEqual(resp.json_body['roles'], []) @unit.skip_if_cache_disabled('assignment') def test_delete_grant_from_group_and_project_invalidates_cache(self): # create a new project new_project = unit.new_project_ref(domain_id=self.domain_id) self.resource_api.create_project(new_project['id'], new_project) collection_url = ( '/projects/%(project_id)s/groups/%(group_id)s/roles' % { 'project_id': new_project['id'], 'group_id': self.group['id']}) member_url = '%(collection_url)s/%(role_id)s' % { 'collection_url': collection_url, 'role_id': self.role_id} # create the group a grant on the new project self.put(member_url) # check the grant that was just created self.head(member_url) resp = self.get(collection_url) self.assertValidRoleListResponse(resp, ref=self.role, resource_url=collection_url) # delete the grant self.delete(member_url) # get the collection and ensure there are no roles on the project resp = self.get(collection_url) self.assertListEqual(resp.json_body['roles'], []) @unit.skip_if_cache_disabled('assignment') def test_delete_grant_from_group_and_domain_invalidates_cache(self): # create a new domain new_domain = unit.new_domain_ref() self.resource_api.create_domain(new_domain['id'], new_domain) collection_url = ( '/domains/%(domain_id)s/groups/%(group_id)s/roles' % { 'domain_id': new_domain['id'], 'group_id': self.group['id']}) member_url = '%(collection_url)s/%(role_id)s' % { 'collection_url': collection_url, 'role_id': self.role_id} # create the group a grant on the new domain self.put(member_url) # check the grant that was just created self.head(member_url) resp = self.get(collection_url) self.assertValidRoleListResponse(resp, ref=self.role, resource_url=collection_url) # delete the grant self.delete(member_url) # get the collection and ensure there are no roles on the domain resp = self.get(collection_url) self.assertListEqual(resp.json_body['roles'], []) # Role Assignments tests def test_get_role_assignments(self): """Call ``GET /role_assignments``. The sample data set up already has a user, group and project that is part of self.domain. We use these plus a new user we create as our data set, making sure we ignore any role assignments that are already in existence. Since we don't yet support a first class entity for role assignments, we are only testing the LIST API. To create and delete the role assignments we use the old grant APIs. Test Plan: - Create extra user for tests - Get a list of all existing role assignments - Add a new assignment for each of the four combinations, i.e. group+domain, user+domain, group+project, user+project, using the same role each time - Get a new list of all role assignments, checking these four new ones have been added - Then delete the four we added - Get a new list of all role assignments, checking the four have been removed """ # Since the default fixtures already assign some roles to the # user it creates, we also need a new user that will not have any # existing assignments user1 = unit.new_user_ref(domain_id=self.domain['id']) user1 = self.identity_api.create_user(user1) collection_url = '/role_assignments' r = self.get(collection_url) self.assertValidRoleAssignmentListResponse(r, resource_url=collection_url) existing_assignments = len(r.result.get('role_assignments')) # Now add one of each of the four types of assignment, making sure # that we get them all back. gd_entity = self.build_role_assignment_entity(domain_id=self.domain_id, group_id=self.group_id, role_id=self.role_id) self.put(gd_entity['links']['assignment']) r = self.get(collection_url) self.assertValidRoleAssignmentListResponse( r, expected_length=existing_assignments + 1, resource_url=collection_url) self.assertRoleAssignmentInListResponse(r, gd_entity) ud_entity = self.build_role_assignment_entity(domain_id=self.domain_id, user_id=user1['id'], role_id=self.role_id) self.put(ud_entity['links']['assignment']) r = self.get(collection_url) self.assertValidRoleAssignmentListResponse( r, expected_length=existing_assignments + 2, resource_url=collection_url) self.assertRoleAssignmentInListResponse(r, ud_entity) gp_entity = self.build_role_assignment_entity( project_id=self.project_id, group_id=self.group_id, role_id=self.role_id) self.put(gp_entity['links']['assignment']) r = self.get(collection_url) self.assertValidRoleAssignmentListResponse( r, expected_length=existing_assignments + 3, resource_url=collection_url) self.assertRoleAssignmentInListResponse(r, gp_entity) up_entity = self.build_role_assignment_entity( project_id=self.project_id, user_id=user1['id'], role_id=self.role_id) self.put(up_entity['links']['assignment']) r = self.get(collection_url) self.assertValidRoleAssignmentListResponse( r, expected_length=existing_assignments + 4, resource_url=collection_url) self.assertRoleAssignmentInListResponse(r, up_entity) # Now delete the four we added and make sure they are removed # from the collection. self.delete(gd_entity['links']['assignment']) self.delete(ud_entity['links']['assignment']) self.delete(gp_entity['links']['assignment']) self.delete(up_entity['links']['assignment']) r = self.get(collection_url) self.assertValidRoleAssignmentListResponse( r, expected_length=existing_assignments, resource_url=collection_url) self.assertRoleAssignmentNotInListResponse(r, gd_entity) self.assertRoleAssignmentNotInListResponse(r, ud_entity) self.assertRoleAssignmentNotInListResponse(r, gp_entity) self.assertRoleAssignmentNotInListResponse(r, up_entity) def test_get_effective_role_assignments(self): """Call ``GET /role_assignments?effective``. Test Plan: - Create two extra user for tests - Add these users to a group - Add a role assignment for the group on a domain - Get a list of all role assignments, checking one has been added - Then get a list of all effective role assignments - the group assignment should have turned into assignments on the domain for each of the group members. """ user1 = unit.create_user(self.identity_api, domain_id=self.domain['id']) user2 = unit.create_user(self.identity_api, domain_id=self.domain['id']) self.identity_api.add_user_to_group(user1['id'], self.group['id']) self.identity_api.add_user_to_group(user2['id'], self.group['id']) collection_url = '/role_assignments' r = self.get(collection_url) self.assertValidRoleAssignmentListResponse(r, resource_url=collection_url) existing_assignments = len(r.result.get('role_assignments')) gd_entity = self.build_role_assignment_entity(domain_id=self.domain_id, group_id=self.group_id, role_id=self.role_id) self.put(gd_entity['links']['assignment']) r = self.get(collection_url) self.assertValidRoleAssignmentListResponse( r, expected_length=existing_assignments + 1, resource_url=collection_url) self.assertRoleAssignmentInListResponse(r, gd_entity) # Now re-read the collection asking for effective roles - this # should mean the group assignment is translated into the two # member user assignments collection_url = '/role_assignments?effective' r = self.get(collection_url) self.assertValidRoleAssignmentListResponse( r, expected_length=existing_assignments + 2, resource_url=collection_url) ud_entity = self.build_role_assignment_entity( link=gd_entity['links']['assignment'], domain_id=self.domain_id, user_id=user1['id'], role_id=self.role_id) self.assertRoleAssignmentInListResponse(r, ud_entity) ud_entity = self.build_role_assignment_entity( link=gd_entity['links']['assignment'], domain_id=self.domain_id, user_id=user2['id'], role_id=self.role_id) self.assertRoleAssignmentInListResponse(r, ud_entity) def test_check_effective_values_for_role_assignments(self): """Call ``GET /role_assignments?effective=value``. Check the various ways of specifying the 'effective' query parameter. If the 'effective' query parameter is included then this should always be treated as meaning 'True' unless it is specified as: {url}?effective=0 This is by design to match the agreed way of handling policy checking on query/filter parameters. Test Plan: - Create two extra user for tests - Add these users to a group - Add a role assignment for the group on a domain - Get a list of all role assignments, checking one has been added - Then issue various request with different ways of defining the 'effective' query parameter. As we have tested the correctness of the data coming back when we get effective roles in other tests, here we just use the count of entities to know if we are getting effective roles or not """ user1 = unit.create_user(self.identity_api, domain_id=self.domain['id']) user2 = unit.create_user(self.identity_api, domain_id=self.domain['id']) self.identity_api.add_user_to_group(user1['id'], self.group['id']) self.identity_api.add_user_to_group(user2['id'], self.group['id']) collection_url = '/role_assignments' r = self.get(collection_url) self.assertValidRoleAssignmentListResponse(r, resource_url=collection_url) existing_assignments = len(r.result.get('role_assignments')) gd_entity = self.build_role_assignment_entity(domain_id=self.domain_id, group_id=self.group_id, role_id=self.role_id) self.put(gd_entity['links']['assignment']) r = self.get(collection_url) self.assertValidRoleAssignmentListResponse( r, expected_length=existing_assignments + 1, resource_url=collection_url) self.assertRoleAssignmentInListResponse(r, gd_entity) # Now re-read the collection asking for effective roles, # using the most common way of defining "effective'. This # should mean the group assignment is translated into the two # member user assignments collection_url = '/role_assignments?effective' r = self.get(collection_url) self.assertValidRoleAssignmentListResponse( r, expected_length=existing_assignments + 2, resource_url=collection_url) # Now set 'effective' to false explicitly - should get # back the regular roles collection_url = '/role_assignments?effective=0' r = self.get(collection_url) self.assertValidRoleAssignmentListResponse( r, expected_length=existing_assignments + 1, resource_url=collection_url) # Now try setting 'effective' to 'False' explicitly- this is # NOT supported as a way of setting a query or filter # parameter to false by design. Hence we should get back # effective roles. collection_url = '/role_assignments?effective=False' r = self.get(collection_url) self.assertValidRoleAssignmentListResponse( r, expected_length=existing_assignments + 2, resource_url=collection_url) # Now set 'effective' to True explicitly collection_url = '/role_assignments?effective=True' r = self.get(collection_url) self.assertValidRoleAssignmentListResponse( r, expected_length=existing_assignments + 2, resource_url=collection_url) def test_filtered_role_assignments(self): """Call ``GET /role_assignments?filters``. Test Plan: - Create extra users, group, role and project for tests - Make the following assignments: Give group1, role1 on project1 and domain Give user1, role2 on project1 and domain Make User1 a member of Group1 - Test a series of single filter list calls, checking that the correct results are obtained - Test a multi-filtered list call - Test listing all effective roles for a given user - Test the equivalent of the list of roles in a project scoped token (all effective roles for a user on a project) """ # Since the default fixtures already assign some roles to the # user it creates, we also need a new user that will not have any # existing assignments user1 = unit.create_user(self.identity_api, domain_id=self.domain['id']) user2 = unit.create_user(self.identity_api, domain_id=self.domain['id']) group1 = unit.new_group_ref(domain_id=self.domain['id']) group1 = self.identity_api.create_group(group1) self.identity_api.add_user_to_group(user1['id'], group1['id']) self.identity_api.add_user_to_group(user2['id'], group1['id']) project1 = unit.new_project_ref(domain_id=self.domain['id']) self.resource_api.create_project(project1['id'], project1) self.role1 = unit.new_role_ref() self.role_api.create_role(self.role1['id'], self.role1) self.role2 = unit.new_role_ref() self.role_api.create_role(self.role2['id'], self.role2) # Now add one of each of the four types of assignment gd_entity = self.build_role_assignment_entity( domain_id=self.domain_id, group_id=group1['id'], role_id=self.role1['id']) self.put(gd_entity['links']['assignment']) ud_entity = self.build_role_assignment_entity(domain_id=self.domain_id, user_id=user1['id'], role_id=self.role2['id']) self.put(ud_entity['links']['assignment']) gp_entity = self.build_role_assignment_entity( project_id=project1['id'], group_id=group1['id'], role_id=self.role1['id']) self.put(gp_entity['links']['assignment']) up_entity = self.build_role_assignment_entity( project_id=project1['id'], user_id=user1['id'], role_id=self.role2['id']) self.put(up_entity['links']['assignment']) # Now list by various filters to make sure we get back the right ones collection_url = ('/role_assignments?scope.project.id=%s' % project1['id']) r = self.get(collection_url) self.assertValidRoleAssignmentListResponse(r, expected_length=2, resource_url=collection_url) self.assertRoleAssignmentInListResponse(r, up_entity) self.assertRoleAssignmentInListResponse(r, gp_entity) collection_url = ('/role_assignments?scope.domain.id=%s' % self.domain['id']) r = self.get(collection_url) self.assertValidRoleAssignmentListResponse(r, expected_length=2, resource_url=collection_url) self.assertRoleAssignmentInListResponse(r, ud_entity) self.assertRoleAssignmentInListResponse(r, gd_entity) collection_url = '/role_assignments?user.id=%s' % user1['id'] r = self.get(collection_url) self.assertValidRoleAssignmentListResponse(r, expected_length=2, resource_url=collection_url) self.assertRoleAssignmentInListResponse(r, up_entity) self.assertRoleAssignmentInListResponse(r, ud_entity) collection_url = '/role_assignments?group.id=%s' % group1['id'] r = self.get(collection_url) self.assertValidRoleAssignmentListResponse(r, expected_length=2, resource_url=collection_url) self.assertRoleAssignmentInListResponse(r, gd_entity) self.assertRoleAssignmentInListResponse(r, gp_entity) collection_url = '/role_assignments?role.id=%s' % self.role1['id'] r = self.get(collection_url) self.assertValidRoleAssignmentListResponse(r, expected_length=2, resource_url=collection_url) self.assertRoleAssignmentInListResponse(r, gd_entity) self.assertRoleAssignmentInListResponse(r, gp_entity) # Let's try combining two filers together.... collection_url = ( '/role_assignments?user.id=%(user_id)s' '&scope.project.id=%(project_id)s' % { 'user_id': user1['id'], 'project_id': project1['id']}) r = self.get(collection_url) self.assertValidRoleAssignmentListResponse(r, expected_length=1, resource_url=collection_url) self.assertRoleAssignmentInListResponse(r, up_entity) # Now for a harder one - filter for user with effective # roles - this should return role assignment that were directly # assigned as well as by virtue of group membership collection_url = ('/role_assignments?effective&user.id=%s' % user1['id']) r = self.get(collection_url) self.assertValidRoleAssignmentListResponse(r, expected_length=4, resource_url=collection_url) # Should have the two direct roles... self.assertRoleAssignmentInListResponse(r, up_entity) self.assertRoleAssignmentInListResponse(r, ud_entity) # ...and the two via group membership... gp1_link = self.build_role_assignment_link( project_id=project1['id'], group_id=group1['id'], role_id=self.role1['id']) gd1_link = self.build_role_assignment_link(domain_id=self.domain_id, group_id=group1['id'], role_id=self.role1['id']) up1_entity = self.build_role_assignment_entity( link=gp1_link, project_id=project1['id'], user_id=user1['id'], role_id=self.role1['id']) ud1_entity = self.build_role_assignment_entity( link=gd1_link, domain_id=self.domain_id, user_id=user1['id'], role_id=self.role1['id']) self.assertRoleAssignmentInListResponse(r, up1_entity) self.assertRoleAssignmentInListResponse(r, ud1_entity) # ...and for the grand-daddy of them all, simulate the request # that would generate the list of effective roles in a project # scoped token. collection_url = ( '/role_assignments?effective&user.id=%(user_id)s' '&scope.project.id=%(project_id)s' % { 'user_id': user1['id'], 'project_id': project1['id']}) r = self.get(collection_url) self.assertValidRoleAssignmentListResponse(r, expected_length=2, resource_url=collection_url) # Should have one direct role and one from group membership... self.assertRoleAssignmentInListResponse(r, up_entity) self.assertRoleAssignmentInListResponse(r, up1_entity) class RoleAssignmentBaseTestCase(test_v3.RestfulTestCase, test_v3.AssignmentTestMixin): """Base class for testing /v3/role_assignments API behavior.""" MAX_HIERARCHY_BREADTH = 3 MAX_HIERARCHY_DEPTH = CONF.max_project_tree_depth - 1 def load_sample_data(self): """Creates sample data to be used on tests. Created data are i) a role and ii) a domain containing: a project hierarchy and 3 users within 3 groups. """ def create_project_hierarchy(parent_id, depth): """Creates a random project hierarchy.""" if depth == 0: return breadth = random.randint(1, self.MAX_HIERARCHY_BREADTH) subprojects = [] for i in range(breadth): subprojects.append(unit.new_project_ref( domain_id=self.domain_id, parent_id=parent_id)) self.resource_api.create_project(subprojects[-1]['id'], subprojects[-1]) new_parent = subprojects[random.randint(0, breadth - 1)] create_project_hierarchy(new_parent['id'], depth - 1) super(RoleAssignmentBaseTestCase, self).load_sample_data() # Create a domain self.domain = unit.new_domain_ref() self.domain_id = self.domain['id'] self.resource_api.create_domain(self.domain_id, self.domain) # Create a project hierarchy self.project = unit.new_project_ref(domain_id=self.domain_id) self.project_id = self.project['id'] self.resource_api.create_project(self.project_id, self.project) # Create a random project hierarchy create_project_hierarchy(self.project_id, random.randint(1, self.MAX_HIERARCHY_DEPTH)) # Create 3 users self.user_ids = [] for i in range(3): user = unit.new_user_ref(domain_id=self.domain_id) user = self.identity_api.create_user(user) self.user_ids.append(user['id']) # Create 3 groups self.group_ids = [] for i in range(3): group = unit.new_group_ref(domain_id=self.domain_id) group = self.identity_api.create_group(group) self.group_ids.append(group['id']) # Put 2 members on each group self.identity_api.add_user_to_group(user_id=self.user_ids[i], group_id=group['id']) self.identity_api.add_user_to_group(user_id=self.user_ids[i % 2], group_id=group['id']) self.assignment_api.create_grant(user_id=self.user_id, project_id=self.project_id, role_id=self.role_id) # Create a role self.role = unit.new_role_ref() self.role_id = self.role['id'] self.role_api.create_role(self.role_id, self.role) # Set default user and group to be used on tests self.default_user_id = self.user_ids[0] self.default_group_id = self.group_ids[0] def get_role_assignments(self, expected_status=http_client.OK, **filters): """Returns the result from querying role assignment API + queried URL. Calls GET /v3/role_assignments? and returns its result, where is the HTTP query parameters form of effective option plus filters, if provided. Queried URL is returned as well. :returns: a tuple containing the list role assignments API response and queried URL. """ query_url = self._get_role_assignments_query_url(**filters) response = self.get(query_url, expected_status=expected_status) return (response, query_url) def _get_role_assignments_query_url(self, **filters): """Returns non-effective role assignments query URL from given filters. :param filters: query parameters are created with the provided filters on role assignments attributes. Valid filters are: role_id, domain_id, project_id, group_id, user_id and inherited_to_projects. :returns: role assignments query URL. """ return self.build_role_assignment_query_url(**filters) class RoleAssignmentFailureTestCase(RoleAssignmentBaseTestCase): """Class for testing invalid query params on /v3/role_assignments API. Querying domain and project, or user and group results in a HTTP 400 Bad Request, since a role assignment must contain only a single pair of (actor, target). In addition, since filtering on role assignments applies only to the final result, effective mode cannot be combined with i) group or ii) domain and inherited, because it would always result in an empty list. """ def test_get_role_assignments_by_domain_and_project(self): self.get_role_assignments(domain_id=self.domain_id, project_id=self.project_id, expected_status=http_client.BAD_REQUEST) def test_get_role_assignments_by_user_and_group(self): self.get_role_assignments(user_id=self.default_user_id, group_id=self.default_group_id, expected_status=http_client.BAD_REQUEST) def test_get_role_assignments_by_effective_and_inherited(self): self.config_fixture.config(group='os_inherit', enabled=True) self.get_role_assignments(domain_id=self.domain_id, effective=True, inherited_to_projects=True, expected_status=http_client.BAD_REQUEST) def test_get_role_assignments_by_effective_and_group(self): self.get_role_assignments(effective=True, group_id=self.default_group_id, expected_status=http_client.BAD_REQUEST) class RoleAssignmentDirectTestCase(RoleAssignmentBaseTestCase): """Class for testing direct assignments on /v3/role_assignments API. Direct assignments on a domain or project have effect on them directly, instead of on their project hierarchy, i.e they are non-inherited. In addition, group direct assignments are not expanded to group's users. Tests on this class make assertions on the representation and API filtering of direct assignments. """ def _test_get_role_assignments(self, **filters): """Generic filtering test method. According to the provided filters, this method: - creates a new role assignment; - asserts that list role assignments API reponds correctly; - deletes the created role assignment. :param filters: filters to be considered when listing role assignments. Valid filters are: role_id, domain_id, project_id, group_id, user_id and inherited_to_projects. """ # Fills default assignment with provided filters test_assignment = self._set_default_assignment_attributes(**filters) # Create new role assignment for this test self.assignment_api.create_grant(**test_assignment) # Get expected role assignments expected_assignments = self._list_expected_role_assignments( **test_assignment) # Get role assignments from API response, query_url = self.get_role_assignments(**test_assignment) self.assertValidRoleAssignmentListResponse(response, resource_url=query_url) self.assertEqual(len(expected_assignments), len(response.result.get('role_assignments'))) # Assert that expected role assignments were returned by the API call for assignment in expected_assignments: self.assertRoleAssignmentInListResponse(response, assignment) # Delete created role assignment self.assignment_api.delete_grant(**test_assignment) def _set_default_assignment_attributes(self, **attribs): """Inserts default values for missing attributes of role assignment. If no actor, target or role are provided, they will default to values from sample data. :param attribs: info from a role assignment entity. Valid attributes are: role_id, domain_id, project_id, group_id, user_id and inherited_to_projects. """ if not any(target in attribs for target in ('domain_id', 'projects_id')): attribs['project_id'] = self.project_id if not any(actor in attribs for actor in ('user_id', 'group_id')): attribs['user_id'] = self.default_user_id if 'role_id' not in attribs: attribs['role_id'] = self.role_id return attribs def _list_expected_role_assignments(self, **filters): """Given the filters, it returns expected direct role assignments. :param filters: filters that will be considered when listing role assignments. Valid filters are: role_id, domain_id, project_id, group_id, user_id and inherited_to_projects. :returns: the list of the expected role assignments. """ return [self.build_role_assignment_entity(**filters)] # Test cases below call the generic test method, providing different filter # combinations. Filters are provided as specified in the method name, after # 'by'. For example, test_get_role_assignments_by_project_user_and_role # calls the generic test method with project_id, user_id and role_id. def test_get_role_assignments_by_domain(self, **filters): self._test_get_role_assignments(domain_id=self.domain_id, **filters) def test_get_role_assignments_by_project(self, **filters): self._test_get_role_assignments(project_id=self.project_id, **filters) def test_get_role_assignments_by_user(self, **filters): self._test_get_role_assignments(user_id=self.default_user_id, **filters) def test_get_role_assignments_by_group(self, **filters): self._test_get_role_assignments(group_id=self.default_group_id, **filters) def test_get_role_assignments_by_role(self, **filters): self._test_get_role_assignments(role_id=self.role_id, **filters) def test_get_role_assignments_by_domain_and_user(self, **filters): self.test_get_role_assignments_by_domain(user_id=self.default_user_id, **filters) def test_get_role_assignments_by_domain_and_group(self, **filters): self.test_get_role_assignments_by_domain( group_id=self.default_group_id, **filters) def test_get_role_assignments_by_project_and_user(self, **filters): self.test_get_role_assignments_by_project(user_id=self.default_user_id, **filters) def test_get_role_assignments_by_project_and_group(self, **filters): self.test_get_role_assignments_by_project( group_id=self.default_group_id, **filters) def test_get_role_assignments_by_domain_user_and_role(self, **filters): self.test_get_role_assignments_by_domain_and_user(role_id=self.role_id, **filters) def test_get_role_assignments_by_domain_group_and_role(self, **filters): self.test_get_role_assignments_by_domain_and_group( role_id=self.role_id, **filters) def test_get_role_assignments_by_project_user_and_role(self, **filters): self.test_get_role_assignments_by_project_and_user( role_id=self.role_id, **filters) def test_get_role_assignments_by_project_group_and_role(self, **filters): self.test_get_role_assignments_by_project_and_group( role_id=self.role_id, **filters) class RoleAssignmentInheritedTestCase(RoleAssignmentDirectTestCase): """Class for testing inherited assignments on /v3/role_assignments API. Inherited assignments on a domain or project have no effect on them directly, but on the projects under them instead. Tests on this class do not make assertions on the effect of inherited assignments, but in their representation and API filtering. """ def config_overrides(self): super(RoleAssignmentBaseTestCase, self).config_overrides() self.config_fixture.config(group='os_inherit', enabled=True) def _test_get_role_assignments(self, **filters): """Adds inherited_to_project filter to expected entity in tests.""" super(RoleAssignmentInheritedTestCase, self)._test_get_role_assignments(inherited_to_projects=True, **filters) class RoleAssignmentEffectiveTestCase(RoleAssignmentInheritedTestCase): """Class for testing inheritance effects on /v3/role_assignments API. Inherited assignments on a domain or project have no effect on them directly, but on the projects under them instead. Tests on this class make assertions on the effect of inherited assignments and API filtering. """ def _get_role_assignments_query_url(self, **filters): """Returns effective role assignments query URL from given filters. For test methods in this class, effetive will always be true. As in effective mode, inherited_to_projects, group_id, domain_id and project_id will always be desconsidered from provided filters. :param filters: query parameters are created with the provided filters. Valid filters are: role_id, domain_id, project_id, group_id, user_id and inherited_to_projects. :returns: role assignments query URL. """ query_filters = filters.copy() query_filters.pop('inherited_to_projects') query_filters.pop('group_id', None) query_filters.pop('domain_id', None) query_filters.pop('project_id', None) return self.build_role_assignment_query_url(effective=True, **query_filters) def _list_expected_role_assignments(self, **filters): """Given the filters, it returns expected direct role assignments. :param filters: filters that will be considered when listing role assignments. Valid filters are: role_id, domain_id, project_id, group_id, user_id and inherited_to_projects. :returns: the list of the expected role assignments. """ # Get assignment link, to be put on 'links': {'assignment': link} assignment_link = self.build_role_assignment_link(**filters) # Expand group membership user_ids = [None] if filters.get('group_id'): user_ids = [user['id'] for user in self.identity_api.list_users_in_group( filters['group_id'])] else: user_ids = [self.default_user_id] # Expand role inheritance project_ids = [None] if filters.get('domain_id'): project_ids = [project['id'] for project in self.resource_api.list_projects_in_domain( filters.pop('domain_id'))] else: project_ids = [project['id'] for project in self.resource_api.list_projects_in_subtree( self.project_id)] # Compute expected role assignments assignments = [] for project_id in project_ids: filters['project_id'] = project_id for user_id in user_ids: filters['user_id'] = user_id assignments.append(self.build_role_assignment_entity( link=assignment_link, **filters)) return assignments class AssignmentInheritanceTestCase(test_v3.RestfulTestCase, test_v3.AssignmentTestMixin): """Test inheritance crud and its effects.""" def config_overrides(self): super(AssignmentInheritanceTestCase, self).config_overrides() self.config_fixture.config(group='os_inherit', enabled=True) def test_get_token_from_inherited_user_domain_role_grants(self): # Create a new user to ensure that no grant is loaded from sample data user = unit.create_user(self.identity_api, domain_id=self.domain_id) # Define domain and project authentication data domain_auth_data = self.build_authentication_request( user_id=user['id'], password=user['password'], domain_id=self.domain_id) project_auth_data = self.build_authentication_request( user_id=user['id'], password=user['password'], project_id=self.project_id) # Check the user cannot get a domain nor a project token self.v3_create_token(domain_auth_data, expected_status=http_client.UNAUTHORIZED) self.v3_create_token(project_auth_data, expected_status=http_client.UNAUTHORIZED) # Grant non-inherited role for user on domain non_inher_ud_link = self.build_role_assignment_link( domain_id=self.domain_id, user_id=user['id'], role_id=self.role_id) self.put(non_inher_ud_link) # Check the user can get only a domain token self.v3_create_token(domain_auth_data) self.v3_create_token(project_auth_data, expected_status=http_client.UNAUTHORIZED) # Create inherited role inherited_role = unit.new_role_ref(name='inherited') self.role_api.create_role(inherited_role['id'], inherited_role) # Grant inherited role for user on domain inher_ud_link = self.build_role_assignment_link( domain_id=self.domain_id, user_id=user['id'], role_id=inherited_role['id'], inherited_to_projects=True) self.put(inher_ud_link) # Check the user can get both a domain and a project token self.v3_create_token(domain_auth_data) self.v3_create_token(project_auth_data) # Delete inherited grant self.delete(inher_ud_link) # Check the user can only get a domain token self.v3_create_token(domain_auth_data) self.v3_create_token(project_auth_data, expected_status=http_client.UNAUTHORIZED) # Delete non-inherited grant self.delete(non_inher_ud_link) # Check the user cannot get a domain token anymore self.v3_create_token(domain_auth_data, expected_status=http_client.UNAUTHORIZED) def test_get_token_from_inherited_group_domain_role_grants(self): # Create a new group and put a new user in it to # ensure that no grant is loaded from sample data user = unit.create_user(self.identity_api, domain_id=self.domain_id) group = unit.new_group_ref(domain_id=self.domain['id']) group = self.identity_api.create_group(group) self.identity_api.add_user_to_group(user['id'], group['id']) # Define domain and project authentication data domain_auth_data = self.build_authentication_request( user_id=user['id'], password=user['password'], domain_id=self.domain_id) project_auth_data = self.build_authentication_request( user_id=user['id'], password=user['password'], project_id=self.project_id) # Check the user cannot get a domain nor a project token self.v3_create_token(domain_auth_data, expected_status=http_client.UNAUTHORIZED) self.v3_create_token(project_auth_data, expected_status=http_client.UNAUTHORIZED) # Grant non-inherited role for user on domain non_inher_gd_link = self.build_role_assignment_link( domain_id=self.domain_id, user_id=user['id'], role_id=self.role_id) self.put(non_inher_gd_link) # Check the user can get only a domain token self.v3_create_token(domain_auth_data) self.v3_create_token(project_auth_data, expected_status=http_client.UNAUTHORIZED) # Create inherited role inherited_role = unit.new_role_ref(name='inherited') self.role_api.create_role(inherited_role['id'], inherited_role) # Grant inherited role for user on domain inher_gd_link = self.build_role_assignment_link( domain_id=self.domain_id, user_id=user['id'], role_id=inherited_role['id'], inherited_to_projects=True) self.put(inher_gd_link) # Check the user can get both a domain and a project token self.v3_create_token(domain_auth_data) self.v3_create_token(project_auth_data) # Delete inherited grant self.delete(inher_gd_link) # Check the user can only get a domain token self.v3_create_token(domain_auth_data) self.v3_create_token(project_auth_data, expected_status=http_client.UNAUTHORIZED) # Delete non-inherited grant self.delete(non_inher_gd_link) # Check the user cannot get a domain token anymore self.v3_create_token(domain_auth_data, expected_status=http_client.UNAUTHORIZED) def _test_crud_inherited_and_direct_assignment_on_target(self, target_url): # Create a new role to avoid assignments loaded from sample data role = unit.new_role_ref() self.role_api.create_role(role['id'], role) # Define URLs direct_url = '%s/users/%s/roles/%s' % ( target_url, self.user_id, role['id']) inherited_url = '/OS-INHERIT/%s/inherited_to_projects' % direct_url # Create the direct assignment self.put(direct_url) # Check the direct assignment exists, but the inherited one does not self.head(direct_url) self.head(inherited_url, expected_status=http_client.NOT_FOUND) # Now add the inherited assignment self.put(inherited_url) # Check both the direct and inherited assignment exist self.head(direct_url) self.head(inherited_url) # Delete indirect assignment self.delete(inherited_url) # Check the direct assignment exists, but the inherited one does not self.head(direct_url) self.head(inherited_url, expected_status=http_client.NOT_FOUND) # Now delete the inherited assignment self.delete(direct_url) # Check that none of them exist self.head(direct_url, expected_status=http_client.NOT_FOUND) self.head(inherited_url, expected_status=http_client.NOT_FOUND) def test_crud_inherited_and_direct_assignment_on_domains(self): self._test_crud_inherited_and_direct_assignment_on_target( '/domains/%s' % self.domain_id) def test_crud_inherited_and_direct_assignment_on_projects(self): self._test_crud_inherited_and_direct_assignment_on_target( '/projects/%s' % self.project_id) def test_crud_user_inherited_domain_role_grants(self): role_list = [] for _ in range(2): role = unit.new_role_ref() self.role_api.create_role(role['id'], role) role_list.append(role) # Create a non-inherited role as a spoiler self.assignment_api.create_grant( role_list[1]['id'], user_id=self.user['id'], domain_id=self.domain_id) base_collection_url = ( '/OS-INHERIT/domains/%(domain_id)s/users/%(user_id)s/roles' % { 'domain_id': self.domain_id, 'user_id': self.user['id']}) member_url = '%(collection_url)s/%(role_id)s/inherited_to_projects' % { 'collection_url': base_collection_url, 'role_id': role_list[0]['id']} collection_url = base_collection_url + '/inherited_to_projects' self.put(member_url) # Check we can read it back self.head(member_url) r = self.get(collection_url) self.assertValidRoleListResponse(r, ref=role_list[0], resource_url=collection_url) # Now delete and check its gone self.delete(member_url) r = self.get(collection_url) self.assertValidRoleListResponse(r, expected_length=0, resource_url=collection_url) def test_list_role_assignments_for_inherited_domain_grants(self): """Call ``GET /role_assignments with inherited domain grants``. Test Plan: - Create 4 roles - Create a domain with a user and two projects - Assign two direct roles to project1 - Assign a spoiler role to project2 - Issue the URL to add inherited role to the domain - Issue the URL to check it is indeed on the domain - Issue the URL to check effective roles on project1 - this should return 3 roles. """ role_list = [] for _ in range(4): role = unit.new_role_ref() self.role_api.create_role(role['id'], role) role_list.append(role) domain = unit.new_domain_ref() self.resource_api.create_domain(domain['id'], domain) user1 = unit.create_user(self.identity_api, domain_id=domain['id']) project1 = unit.new_project_ref(domain_id=domain['id']) self.resource_api.create_project(project1['id'], project1) project2 = unit.new_project_ref(domain_id=domain['id']) self.resource_api.create_project(project2['id'], project2) # Add some roles to the project self.assignment_api.add_role_to_user_and_project( user1['id'], project1['id'], role_list[0]['id']) self.assignment_api.add_role_to_user_and_project( user1['id'], project1['id'], role_list[1]['id']) # ..and one on a different project as a spoiler self.assignment_api.add_role_to_user_and_project( user1['id'], project2['id'], role_list[2]['id']) # Now create our inherited role on the domain base_collection_url = ( '/OS-INHERIT/domains/%(domain_id)s/users/%(user_id)s/roles' % { 'domain_id': domain['id'], 'user_id': user1['id']}) member_url = '%(collection_url)s/%(role_id)s/inherited_to_projects' % { 'collection_url': base_collection_url, 'role_id': role_list[3]['id']} collection_url = base_collection_url + '/inherited_to_projects' self.put(member_url) self.head(member_url) r = self.get(collection_url) self.assertValidRoleListResponse(r, ref=role_list[3], resource_url=collection_url) # Now use the list domain role assignments api to check if this # is included collection_url = ( '/role_assignments?user.id=%(user_id)s' '&scope.domain.id=%(domain_id)s' % { 'user_id': user1['id'], 'domain_id': domain['id']}) r = self.get(collection_url) self.assertValidRoleAssignmentListResponse(r, expected_length=1, resource_url=collection_url) ud_entity = self.build_role_assignment_entity( domain_id=domain['id'], user_id=user1['id'], role_id=role_list[3]['id'], inherited_to_projects=True) self.assertRoleAssignmentInListResponse(r, ud_entity) # Now ask for effective list role assignments - the role should # turn into a project role, along with the two direct roles that are # on the project collection_url = ( '/role_assignments?effective&user.id=%(user_id)s' '&scope.project.id=%(project_id)s' % { 'user_id': user1['id'], 'project_id': project1['id']}) r = self.get(collection_url) self.assertValidRoleAssignmentListResponse(r, expected_length=3, resource_url=collection_url) # An effective role for an inherited role will be a project # entity, with a domain link to the inherited assignment ud_url = self.build_role_assignment_link( domain_id=domain['id'], user_id=user1['id'], role_id=role_list[3]['id'], inherited_to_projects=True) up_entity = self.build_role_assignment_entity( link=ud_url, project_id=project1['id'], user_id=user1['id'], role_id=role_list[3]['id'], inherited_to_projects=True) self.assertRoleAssignmentInListResponse(r, up_entity) def test_list_role_assignments_include_names(self): """Call ``GET /role_assignments with include names``. Test Plan: - Create a domain with a group and a user - Create a project with a group and a user """ role1 = unit.new_role_ref() self.role_api.create_role(role1['id'], role1) user1 = unit.create_user(self.identity_api, domain_id=self.domain_id) group = unit.new_group_ref(domain_id=self.domain_id) group = self.identity_api.create_group(group) project1 = unit.new_project_ref(domain_id=self.domain_id) self.resource_api.create_project(project1['id'], project1) expected_entity1 = self.build_role_assignment_entity_include_names( role_ref=role1, project_ref=project1, user_ref=user1) self.put(expected_entity1['links']['assignment']) expected_entity2 = self.build_role_assignment_entity_include_names( role_ref=role1, domain_ref=self.domain, group_ref=group) self.put(expected_entity2['links']['assignment']) expected_entity3 = self.build_role_assignment_entity_include_names( role_ref=role1, domain_ref=self.domain, user_ref=user1) self.put(expected_entity3['links']['assignment']) expected_entity4 = self.build_role_assignment_entity_include_names( role_ref=role1, project_ref=project1, group_ref=group) self.put(expected_entity4['links']['assignment']) collection_url_domain = ( '/role_assignments?include_names&scope.domain.id=%(domain_id)s' % { 'domain_id': self.domain_id}) rs_domain = self.get(collection_url_domain) collection_url_project = ( '/role_assignments?include_names&' 'scope.project.id=%(project_id)s' % { 'project_id': project1['id']}) rs_project = self.get(collection_url_project) collection_url_group = ( '/role_assignments?include_names&group.id=%(group_id)s' % { 'group_id': group['id']}) rs_group = self.get(collection_url_group) collection_url_user = ( '/role_assignments?include_names&user.id=%(user_id)s' % { 'user_id': user1['id']}) rs_user = self.get(collection_url_user) collection_url_role = ( '/role_assignments?include_names&role.id=%(role_id)s' % { 'role_id': role1['id']}) rs_role = self.get(collection_url_role) # Make sure all entities were created successfully self.assertEqual(rs_domain.status_int, http_client.OK) self.assertEqual(rs_project.status_int, http_client.OK) self.assertEqual(rs_group.status_int, http_client.OK) self.assertEqual(rs_user.status_int, http_client.OK) # Make sure we can get back the correct number of entities self.assertValidRoleAssignmentListResponse( rs_domain, expected_length=2, resource_url=collection_url_domain) self.assertValidRoleAssignmentListResponse( rs_project, expected_length=2, resource_url=collection_url_project) self.assertValidRoleAssignmentListResponse( rs_group, expected_length=2, resource_url=collection_url_group) self.assertValidRoleAssignmentListResponse( rs_user, expected_length=2, resource_url=collection_url_user) self.assertValidRoleAssignmentListResponse( rs_role, expected_length=4, resource_url=collection_url_role) # Verify all types of entities have the correct format self.assertRoleAssignmentInListResponse(rs_domain, expected_entity2) self.assertRoleAssignmentInListResponse(rs_project, expected_entity1) self.assertRoleAssignmentInListResponse(rs_group, expected_entity4) self.assertRoleAssignmentInListResponse(rs_user, expected_entity3) self.assertRoleAssignmentInListResponse(rs_role, expected_entity1) def test_list_role_assignments_for_disabled_inheritance_extension(self): """Call ``GET /role_assignments with inherited domain grants``. Test Plan: - Issue the URL to add inherited role to the domain - Issue the URL to check effective roles on project include the inherited role - Disable the extension - Re-check the effective roles, proving the inherited role no longer shows up. """ role_list = [] for _ in range(4): role = unit.new_role_ref() self.role_api.create_role(role['id'], role) role_list.append(role) domain = unit.new_domain_ref() self.resource_api.create_domain(domain['id'], domain) user1 = unit.create_user(self.identity_api, domain_id=domain['id']) project1 = unit.new_project_ref(domain_id=domain['id']) self.resource_api.create_project(project1['id'], project1) project2 = unit.new_project_ref(domain_id=domain['id']) self.resource_api.create_project(project2['id'], project2) # Add some roles to the project self.assignment_api.add_role_to_user_and_project( user1['id'], project1['id'], role_list[0]['id']) self.assignment_api.add_role_to_user_and_project( user1['id'], project1['id'], role_list[1]['id']) # ..and one on a different project as a spoiler self.assignment_api.add_role_to_user_and_project( user1['id'], project2['id'], role_list[2]['id']) # Now create our inherited role on the domain base_collection_url = ( '/OS-INHERIT/domains/%(domain_id)s/users/%(user_id)s/roles' % { 'domain_id': domain['id'], 'user_id': user1['id']}) member_url = '%(collection_url)s/%(role_id)s/inherited_to_projects' % { 'collection_url': base_collection_url, 'role_id': role_list[3]['id']} collection_url = base_collection_url + '/inherited_to_projects' self.put(member_url) self.head(member_url) r = self.get(collection_url) self.assertValidRoleListResponse(r, ref=role_list[3], resource_url=collection_url) # Get effective list role assignments - the role should # turn into a project role, along with the two direct roles that are # on the project collection_url = ( '/role_assignments?effective&user.id=%(user_id)s' '&scope.project.id=%(project_id)s' % { 'user_id': user1['id'], 'project_id': project1['id']}) r = self.get(collection_url) self.assertValidRoleAssignmentListResponse(r, expected_length=3, resource_url=collection_url) ud_url = self.build_role_assignment_link( domain_id=domain['id'], user_id=user1['id'], role_id=role_list[3]['id'], inherited_to_projects=True) up_entity = self.build_role_assignment_entity( link=ud_url, project_id=project1['id'], user_id=user1['id'], role_id=role_list[3]['id'], inherited_to_projects=True) self.assertRoleAssignmentInListResponse(r, up_entity) # Disable the extension and re-check the list, the role inherited # from the project should no longer show up self.config_fixture.config(group='os_inherit', enabled=False) r = self.get(collection_url) self.assertValidRoleAssignmentListResponse(r, expected_length=2, resource_url=collection_url) self.assertRoleAssignmentNotInListResponse(r, up_entity) def test_list_role_assignments_for_inherited_group_domain_grants(self): """Call ``GET /role_assignments with inherited group domain grants``. Test Plan: - Create 4 roles - Create a domain with a user and two projects - Assign two direct roles to project1 - Assign a spoiler role to project2 - Issue the URL to add inherited role to the domain - Issue the URL to check it is indeed on the domain - Issue the URL to check effective roles on project1 - this should return 3 roles. """ role_list = [] for _ in range(4): role = unit.new_role_ref() self.role_api.create_role(role['id'], role) role_list.append(role) domain = unit.new_domain_ref() self.resource_api.create_domain(domain['id'], domain) user1 = unit.create_user(self.identity_api, domain_id=domain['id']) user2 = unit.create_user(self.identity_api, domain_id=domain['id']) group1 = unit.new_group_ref(domain_id=domain['id']) group1 = self.identity_api.create_group(group1) self.identity_api.add_user_to_group(user1['id'], group1['id']) self.identity_api.add_user_to_group(user2['id'], group1['id']) project1 = unit.new_project_ref(domain_id=domain['id']) self.resource_api.create_project(project1['id'], project1) project2 = unit.new_project_ref(domain_id=domain['id']) self.resource_api.create_project(project2['id'], project2) # Add some roles to the project self.assignment_api.add_role_to_user_and_project( user1['id'], project1['id'], role_list[0]['id']) self.assignment_api.add_role_to_user_and_project( user1['id'], project1['id'], role_list[1]['id']) # ..and one on a different project as a spoiler self.assignment_api.add_role_to_user_and_project( user1['id'], project2['id'], role_list[2]['id']) # Now create our inherited role on the domain base_collection_url = ( '/OS-INHERIT/domains/%(domain_id)s/groups/%(group_id)s/roles' % { 'domain_id': domain['id'], 'group_id': group1['id']}) member_url = '%(collection_url)s/%(role_id)s/inherited_to_projects' % { 'collection_url': base_collection_url, 'role_id': role_list[3]['id']} collection_url = base_collection_url + '/inherited_to_projects' self.put(member_url) self.head(member_url) r = self.get(collection_url) self.assertValidRoleListResponse(r, ref=role_list[3], resource_url=collection_url) # Now use the list domain role assignments api to check if this # is included collection_url = ( '/role_assignments?group.id=%(group_id)s' '&scope.domain.id=%(domain_id)s' % { 'group_id': group1['id'], 'domain_id': domain['id']}) r = self.get(collection_url) self.assertValidRoleAssignmentListResponse(r, expected_length=1, resource_url=collection_url) gd_entity = self.build_role_assignment_entity( domain_id=domain['id'], group_id=group1['id'], role_id=role_list[3]['id'], inherited_to_projects=True) self.assertRoleAssignmentInListResponse(r, gd_entity) # Now ask for effective list role assignments - the role should # turn into a user project role, along with the two direct roles # that are on the project collection_url = ( '/role_assignments?effective&user.id=%(user_id)s' '&scope.project.id=%(project_id)s' % { 'user_id': user1['id'], 'project_id': project1['id']}) r = self.get(collection_url) self.assertValidRoleAssignmentListResponse(r, expected_length=3, resource_url=collection_url) # An effective role for an inherited role will be a project # entity, with a domain link to the inherited assignment up_entity = self.build_role_assignment_entity( link=gd_entity['links']['assignment'], project_id=project1['id'], user_id=user1['id'], role_id=role_list[3]['id'], inherited_to_projects=True) self.assertRoleAssignmentInListResponse(r, up_entity) def test_filtered_role_assignments_for_inherited_grants(self): """Call ``GET /role_assignments?scope.OS-INHERIT:inherited_to``. Test Plan: - Create 5 roles - Create a domain with a user, group and two projects - Assign three direct spoiler roles to projects - Issue the URL to add an inherited user role to the domain - Issue the URL to add an inherited group role to the domain - Issue the URL to filter by inherited roles - this should return just the 2 inherited roles. """ role_list = [] for _ in range(5): role = unit.new_role_ref() self.role_api.create_role(role['id'], role) role_list.append(role) domain = unit.new_domain_ref() self.resource_api.create_domain(domain['id'], domain) user1 = unit.create_user(self.identity_api, domain_id=domain['id']) group1 = unit.new_group_ref(domain_id=domain['id']) group1 = self.identity_api.create_group(group1) project1 = unit.new_project_ref(domain_id=domain['id']) self.resource_api.create_project(project1['id'], project1) project2 = unit.new_project_ref(domain_id=domain['id']) self.resource_api.create_project(project2['id'], project2) # Add some spoiler roles to the projects self.assignment_api.add_role_to_user_and_project( user1['id'], project1['id'], role_list[0]['id']) self.assignment_api.add_role_to_user_and_project( user1['id'], project2['id'], role_list[1]['id']) # Create a non-inherited role as a spoiler self.assignment_api.create_grant( role_list[2]['id'], user_id=user1['id'], domain_id=domain['id']) # Now create two inherited roles on the domain, one for a user # and one for a domain base_collection_url = ( '/OS-INHERIT/domains/%(domain_id)s/users/%(user_id)s/roles' % { 'domain_id': domain['id'], 'user_id': user1['id']}) member_url = '%(collection_url)s/%(role_id)s/inherited_to_projects' % { 'collection_url': base_collection_url, 'role_id': role_list[3]['id']} collection_url = base_collection_url + '/inherited_to_projects' self.put(member_url) self.head(member_url) r = self.get(collection_url) self.assertValidRoleListResponse(r, ref=role_list[3], resource_url=collection_url) base_collection_url = ( '/OS-INHERIT/domains/%(domain_id)s/groups/%(group_id)s/roles' % { 'domain_id': domain['id'], 'group_id': group1['id']}) member_url = '%(collection_url)s/%(role_id)s/inherited_to_projects' % { 'collection_url': base_collection_url, 'role_id': role_list[4]['id']} collection_url = base_collection_url + '/inherited_to_projects' self.put(member_url) self.head(member_url) r = self.get(collection_url) self.assertValidRoleListResponse(r, ref=role_list[4], resource_url=collection_url) # Now use the list role assignments api to get a list of inherited # roles on the domain - should get back the two roles collection_url = ( '/role_assignments?scope.OS-INHERIT:inherited_to=projects') r = self.get(collection_url) self.assertValidRoleAssignmentListResponse(r, expected_length=2, resource_url=collection_url) ud_entity = self.build_role_assignment_entity( domain_id=domain['id'], user_id=user1['id'], role_id=role_list[3]['id'], inherited_to_projects=True) gd_entity = self.build_role_assignment_entity( domain_id=domain['id'], group_id=group1['id'], role_id=role_list[4]['id'], inherited_to_projects=True) self.assertRoleAssignmentInListResponse(r, ud_entity) self.assertRoleAssignmentInListResponse(r, gd_entity) def _setup_hierarchical_projects_scenario(self): """Creates basic hierarchical projects scenario. This basic scenario contains a root with one leaf project and two roles with the following names: non-inherited and inherited. """ # Create project hierarchy root = unit.new_project_ref(domain_id=self.domain['id']) leaf = unit.new_project_ref(domain_id=self.domain['id'], parent_id=root['id']) self.resource_api.create_project(root['id'], root) self.resource_api.create_project(leaf['id'], leaf) # Create 'non-inherited' and 'inherited' roles non_inherited_role = unit.new_role_ref(name='non-inherited') self.role_api.create_role(non_inherited_role['id'], non_inherited_role) inherited_role = unit.new_role_ref(name='inherited') self.role_api.create_role(inherited_role['id'], inherited_role) return (root['id'], leaf['id'], non_inherited_role['id'], inherited_role['id']) def test_get_token_from_inherited_user_project_role_grants(self): # Create default scenario root_id, leaf_id, non_inherited_role_id, inherited_role_id = ( self._setup_hierarchical_projects_scenario()) # Define root and leaf projects authentication data root_project_auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=root_id) leaf_project_auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=leaf_id) # Check the user cannot get a token on root nor leaf project self.v3_create_token(root_project_auth_data, expected_status=http_client.UNAUTHORIZED) self.v3_create_token(leaf_project_auth_data, expected_status=http_client.UNAUTHORIZED) # Grant non-inherited role for user on leaf project non_inher_up_link = self.build_role_assignment_link( project_id=leaf_id, user_id=self.user['id'], role_id=non_inherited_role_id) self.put(non_inher_up_link) # Check the user can only get a token on leaf project self.v3_create_token(root_project_auth_data, expected_status=http_client.UNAUTHORIZED) self.v3_create_token(leaf_project_auth_data) # Grant inherited role for user on root project inher_up_link = self.build_role_assignment_link( project_id=root_id, user_id=self.user['id'], role_id=inherited_role_id, inherited_to_projects=True) self.put(inher_up_link) # Check the user still can get a token only on leaf project self.v3_create_token(root_project_auth_data, expected_status=http_client.UNAUTHORIZED) self.v3_create_token(leaf_project_auth_data) # Delete non-inherited grant self.delete(non_inher_up_link) # Check the inherited role still applies for leaf project self.v3_create_token(root_project_auth_data, expected_status=http_client.UNAUTHORIZED) self.v3_create_token(leaf_project_auth_data) # Delete inherited grant self.delete(inher_up_link) # Check the user cannot get a token on leaf project anymore self.v3_create_token(leaf_project_auth_data, expected_status=http_client.UNAUTHORIZED) def test_get_token_from_inherited_group_project_role_grants(self): # Create default scenario root_id, leaf_id, non_inherited_role_id, inherited_role_id = ( self._setup_hierarchical_projects_scenario()) # Create group and add user to it group = unit.new_group_ref(domain_id=self.domain['id']) group = self.identity_api.create_group(group) self.identity_api.add_user_to_group(self.user['id'], group['id']) # Define root and leaf projects authentication data root_project_auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=root_id) leaf_project_auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=leaf_id) # Check the user cannot get a token on root nor leaf project self.v3_create_token(root_project_auth_data, expected_status=http_client.UNAUTHORIZED) self.v3_create_token(leaf_project_auth_data, expected_status=http_client.UNAUTHORIZED) # Grant non-inherited role for group on leaf project non_inher_gp_link = self.build_role_assignment_link( project_id=leaf_id, group_id=group['id'], role_id=non_inherited_role_id) self.put(non_inher_gp_link) # Check the user can only get a token on leaf project self.v3_create_token(root_project_auth_data, expected_status=http_client.UNAUTHORIZED) self.v3_create_token(leaf_project_auth_data) # Grant inherited role for group on root project inher_gp_link = self.build_role_assignment_link( project_id=root_id, group_id=group['id'], role_id=inherited_role_id, inherited_to_projects=True) self.put(inher_gp_link) # Check the user still can get a token only on leaf project self.v3_create_token(root_project_auth_data, expected_status=http_client.UNAUTHORIZED) self.v3_create_token(leaf_project_auth_data) # Delete no-inherited grant self.delete(non_inher_gp_link) # Check the inherited role still applies for leaf project self.v3_create_token(leaf_project_auth_data) # Delete inherited grant self.delete(inher_gp_link) # Check the user cannot get a token on leaf project anymore self.v3_create_token(leaf_project_auth_data, expected_status=http_client.UNAUTHORIZED) def test_get_role_assignments_for_project_hierarchy(self): """Call ``GET /role_assignments``. Test Plan: - Create 2 roles - Create a hierarchy of projects with one root and one leaf project - Issue the URL to add a non-inherited user role to the root project - Issue the URL to add an inherited user role to the root project - Issue the URL to get all role assignments - this should return just 2 roles (non-inherited and inherited) in the root project. """ # Create default scenario root_id, leaf_id, non_inherited_role_id, inherited_role_id = ( self._setup_hierarchical_projects_scenario()) # Grant non-inherited role non_inher_up_entity = self.build_role_assignment_entity( project_id=root_id, user_id=self.user['id'], role_id=non_inherited_role_id) self.put(non_inher_up_entity['links']['assignment']) # Grant inherited role inher_up_entity = self.build_role_assignment_entity( project_id=root_id, user_id=self.user['id'], role_id=inherited_role_id, inherited_to_projects=True) self.put(inher_up_entity['links']['assignment']) # Get role assignments collection_url = '/role_assignments' r = self.get(collection_url) self.assertValidRoleAssignmentListResponse(r, resource_url=collection_url) # Assert that the user has non-inherited role on root project self.assertRoleAssignmentInListResponse(r, non_inher_up_entity) # Assert that the user has inherited role on root project self.assertRoleAssignmentInListResponse(r, inher_up_entity) # Assert that the user does not have non-inherited role on leaf project non_inher_up_entity = self.build_role_assignment_entity( project_id=leaf_id, user_id=self.user['id'], role_id=non_inherited_role_id) self.assertRoleAssignmentNotInListResponse(r, non_inher_up_entity) # Assert that the user does not have inherited role on leaf project inher_up_entity['scope']['project']['id'] = leaf_id self.assertRoleAssignmentNotInListResponse(r, inher_up_entity) def test_get_effective_role_assignments_for_project_hierarchy(self): """Call ``GET /role_assignments?effective``. Test Plan: - Create 2 roles - Create a hierarchy of projects with one root and one leaf project - Issue the URL to add a non-inherited user role to the root project - Issue the URL to add an inherited user role to the root project - Issue the URL to get effective role assignments - this should return 1 role (non-inherited) on the root project and 1 role (inherited) on the leaf project. """ # Create default scenario root_id, leaf_id, non_inherited_role_id, inherited_role_id = ( self._setup_hierarchical_projects_scenario()) # Grant non-inherited role non_inher_up_entity = self.build_role_assignment_entity( project_id=root_id, user_id=self.user['id'], role_id=non_inherited_role_id) self.put(non_inher_up_entity['links']['assignment']) # Grant inherited role inher_up_entity = self.build_role_assignment_entity( project_id=root_id, user_id=self.user['id'], role_id=inherited_role_id, inherited_to_projects=True) self.put(inher_up_entity['links']['assignment']) # Get effective role assignments collection_url = '/role_assignments?effective' r = self.get(collection_url) self.assertValidRoleAssignmentListResponse(r, resource_url=collection_url) # Assert that the user has non-inherited role on root project self.assertRoleAssignmentInListResponse(r, non_inher_up_entity) # Assert that the user does not have inherited role on root project self.assertRoleAssignmentNotInListResponse(r, inher_up_entity) # Assert that the user does not have non-inherited role on leaf project non_inher_up_entity = self.build_role_assignment_entity( project_id=leaf_id, user_id=self.user['id'], role_id=non_inherited_role_id) self.assertRoleAssignmentNotInListResponse(r, non_inher_up_entity) # Assert that the user has inherited role on leaf project inher_up_entity['scope']['project']['id'] = leaf_id self.assertRoleAssignmentInListResponse(r, inher_up_entity) def test_project_id_specified_if_include_subtree_specified(self): """When using include_subtree, you must specify a project ID.""" self.get('/role_assignments?include_subtree=True', expected_status=http_client.BAD_REQUEST) self.get('/role_assignments?scope.project.id&' 'include_subtree=True', expected_status=http_client.BAD_REQUEST) def test_get_role_assignments_for_project_tree(self): """Get role_assignment?scope.project.id=X?include_subtree``. Test Plan: - Create 2 roles and a hierarchy of projects with one root and one leaf - Issue the URL to add a non-inherited user role to the root project and the leaf project - Issue the URL to get role assignments for the root project but not the subtree - this should return just the root assignment - Issue the URL to get role assignments for the root project and it's subtree - this should return both assignments - Check that explicitly setting include_subtree to False is the equivalent to not including it at all in the query. """ # Create default scenario root_id, leaf_id, non_inherited_role_id, unused_role_id = ( self._setup_hierarchical_projects_scenario()) # Grant non-inherited role to root and leaf projects non_inher_entity_root = self.build_role_assignment_entity( project_id=root_id, user_id=self.user['id'], role_id=non_inherited_role_id) self.put(non_inher_entity_root['links']['assignment']) non_inher_entity_leaf = self.build_role_assignment_entity( project_id=leaf_id, user_id=self.user['id'], role_id=non_inherited_role_id) self.put(non_inher_entity_leaf['links']['assignment']) # Without the subtree, we should get the one assignment on the # root project collection_url = ( '/role_assignments?scope.project.id=%(project)s' % { 'project': root_id}) r = self.get(collection_url) self.assertValidRoleAssignmentListResponse( r, resource_url=collection_url) self.assertThat(r.result['role_assignments'], matchers.HasLength(1)) self.assertRoleAssignmentInListResponse(r, non_inher_entity_root) # With the subtree, we should get both assignments collection_url = ( '/role_assignments?scope.project.id=%(project)s' '&include_subtree=True' % { 'project': root_id}) r = self.get(collection_url) self.assertValidRoleAssignmentListResponse( r, resource_url=collection_url) self.assertThat(r.result['role_assignments'], matchers.HasLength(2)) self.assertRoleAssignmentInListResponse(r, non_inher_entity_root) self.assertRoleAssignmentInListResponse(r, non_inher_entity_leaf) # With subtree=0, we should also only get the one assignment on the # root project collection_url = ( '/role_assignments?scope.project.id=%(project)s' '&include_subtree=0' % { 'project': root_id}) r = self.get(collection_url) self.assertValidRoleAssignmentListResponse( r, resource_url=collection_url) self.assertThat(r.result['role_assignments'], matchers.HasLength(1)) self.assertRoleAssignmentInListResponse(r, non_inher_entity_root) def test_get_effective_role_assignments_for_project_tree(self): """Get role_assignment ?project_id=X?include_subtree=True?effective``. Test Plan: - Create 2 roles and a hierarchy of projects with one root and 4 levels of child project - Issue the URL to add a non-inherited user role to the root project and a level 1 project - Issue the URL to add an inherited user role on the level 2 project - Issue the URL to get effective role assignments for the level 1 project and it's subtree - this should return a role (non-inherited) on the level 1 project and roles (inherited) on each of the level 2, 3 and 4 projects """ # Create default scenario root_id, leaf_id, non_inherited_role_id, inherited_role_id = ( self._setup_hierarchical_projects_scenario()) # Add some extra projects to the project hierarchy level2 = unit.new_project_ref(domain_id=self.domain['id'], parent_id=leaf_id) level3 = unit.new_project_ref(domain_id=self.domain['id'], parent_id=level2['id']) level4 = unit.new_project_ref(domain_id=self.domain['id'], parent_id=level3['id']) self.resource_api.create_project(level2['id'], level2) self.resource_api.create_project(level3['id'], level3) self.resource_api.create_project(level4['id'], level4) # Grant non-inherited role to root (as a spoiler) and to # the level 1 (leaf) project non_inher_entity_root = self.build_role_assignment_entity( project_id=root_id, user_id=self.user['id'], role_id=non_inherited_role_id) self.put(non_inher_entity_root['links']['assignment']) non_inher_entity_leaf = self.build_role_assignment_entity( project_id=leaf_id, user_id=self.user['id'], role_id=non_inherited_role_id) self.put(non_inher_entity_leaf['links']['assignment']) # Grant inherited role to level 2 inher_entity = self.build_role_assignment_entity( project_id=level2['id'], user_id=self.user['id'], role_id=inherited_role_id, inherited_to_projects=True) self.put(inher_entity['links']['assignment']) # Get effective role assignments collection_url = ( '/role_assignments?scope.project.id=%(project)s' '&include_subtree=True&effective' % { 'project': leaf_id}) r = self.get(collection_url) self.assertValidRoleAssignmentListResponse( r, resource_url=collection_url) # There should be three assignments returned in total self.assertThat(r.result['role_assignments'], matchers.HasLength(3)) # Assert that the user does not non-inherited role on root project self.assertRoleAssignmentNotInListResponse(r, non_inher_entity_root) # Assert that the user does have non-inherited role on leaf project self.assertRoleAssignmentInListResponse(r, non_inher_entity_leaf) # Assert that the user has inherited role on levels 3 and 4 inher_entity['scope']['project']['id'] = level3['id'] self.assertRoleAssignmentInListResponse(r, inher_entity) inher_entity['scope']['project']['id'] = level4['id'] self.assertRoleAssignmentInListResponse(r, inher_entity) def test_get_inherited_role_assignments_for_project_hierarchy(self): """Call ``GET /role_assignments?scope.OS-INHERIT:inherited_to``. Test Plan: - Create 2 roles - Create a hierarchy of projects with one root and one leaf project - Issue the URL to add a non-inherited user role to the root project - Issue the URL to add an inherited user role to the root project - Issue the URL to filter inherited to projects role assignments - this should return 1 role (inherited) on the root project. """ # Create default scenario root_id, leaf_id, non_inherited_role_id, inherited_role_id = ( self._setup_hierarchical_projects_scenario()) # Grant non-inherited role non_inher_up_entity = self.build_role_assignment_entity( project_id=root_id, user_id=self.user['id'], role_id=non_inherited_role_id) self.put(non_inher_up_entity['links']['assignment']) # Grant inherited role inher_up_entity = self.build_role_assignment_entity( project_id=root_id, user_id=self.user['id'], role_id=inherited_role_id, inherited_to_projects=True) self.put(inher_up_entity['links']['assignment']) # Get inherited role assignments collection_url = ('/role_assignments' '?scope.OS-INHERIT:inherited_to=projects') r = self.get(collection_url) self.assertValidRoleAssignmentListResponse(r, resource_url=collection_url) # Assert that the user does not have non-inherited role on root project self.assertRoleAssignmentNotInListResponse(r, non_inher_up_entity) # Assert that the user has inherited role on root project self.assertRoleAssignmentInListResponse(r, inher_up_entity) # Assert that the user does not have non-inherited role on leaf project non_inher_up_entity = self.build_role_assignment_entity( project_id=leaf_id, user_id=self.user['id'], role_id=non_inherited_role_id) self.assertRoleAssignmentNotInListResponse(r, non_inher_up_entity) # Assert that the user does not have inherited role on leaf project inher_up_entity['scope']['project']['id'] = leaf_id self.assertRoleAssignmentNotInListResponse(r, inher_up_entity) class AssignmentInheritanceDisabledTestCase(test_v3.RestfulTestCase): """Test inheritance crud and its effects.""" def config_overrides(self): super(AssignmentInheritanceDisabledTestCase, self).config_overrides() self.config_fixture.config(group='os_inherit', enabled=False) def test_crud_inherited_role_grants_failed_if_disabled(self): role = unit.new_role_ref() self.role_api.create_role(role['id'], role) base_collection_url = ( '/OS-INHERIT/domains/%(domain_id)s/users/%(user_id)s/roles' % { 'domain_id': self.domain_id, 'user_id': self.user['id']}) member_url = '%(collection_url)s/%(role_id)s/inherited_to_projects' % { 'collection_url': base_collection_url, 'role_id': role['id']} collection_url = base_collection_url + '/inherited_to_projects' self.put(member_url, expected_status=http_client.NOT_FOUND) self.head(member_url, expected_status=http_client.NOT_FOUND) self.get(collection_url, expected_status=http_client.NOT_FOUND) self.delete(member_url, expected_status=http_client.NOT_FOUND) class ImpliedRolesTests(test_v3.RestfulTestCase, test_v3.AssignmentTestMixin, unit.TestCase): def _create_role(self): """Call ``POST /roles``.""" ref = unit.new_role_ref() r = self.post('/roles', body={'role': ref}) return self.assertValidRoleResponse(r, ref) def test_list_implied_roles_none(self): self.prior = self._create_role() url = '/roles/%s/implies' % (self.prior['id']) response = self.get(url).json["role_inference"] self.assertEqual(self.prior['id'], response['prior_role']['id']) self.assertEqual(0, len(response['implies'])) def _create_implied_role(self, prior, implied): self.put('/roles/%s/implies/%s' % (prior['id'], implied['id']), expected_status=http_client.CREATED) def _delete_implied_role(self, prior, implied): self.delete('/roles/%s/implies/%s' % (prior['id'], implied['id'])) def _setup_prior_two_implied(self): self.prior = self._create_role() self.implied1 = self._create_role() self._create_implied_role(self.prior, self.implied1) self.implied2 = self._create_role() self._create_implied_role(self.prior, self.implied2) def _assert_expected_implied_role_response( self, expected_prior_id, expected_implied_ids): r = self.get('/roles/%s/implies' % expected_prior_id) response = r.json["role_inference"] self.assertEqual(expected_prior_id, response['prior_role']['id']) actual_implied_ids = [implied['id'] for implied in response['implies']] for expected_id in expected_implied_ids: self.assertIn(expected_id, actual_implied_ids) self.assertEqual(len(expected_implied_ids), len(response['implies'])) self.assertIsNotNone(response['prior_role']['links']['self']) for implied in response['implies']: self.assertIsNotNone(implied['links']['self']) def _assert_two_roles_implied(self): self._assert_expected_implied_role_response( self.prior['id'], [self.implied1['id'], self.implied2['id']]) def _assert_one_role_implied(self): self._assert_expected_implied_role_response( self.prior['id'], [self.implied1['id']]) self.get('/roles/%s/implies/%s' % (self.prior['id'], self.implied2['id']), expected_status=http_client.NOT_FOUND) def _assert_two_rules_defined(self): r = self.get('/role_inferences/') rules = r.result['role_inferences'] self.assertEqual(self.prior['id'], rules[0]['prior_role']['id']) self.assertEqual(2, len(rules[0]['implies'])) implied_ids = [implied['id'] for implied in rules[0]['implies']] implied_names = [implied['name'] for implied in rules[0]['implies']] self.assertIn(self.implied1['id'], implied_ids) self.assertIn(self.implied2['id'], implied_ids) self.assertIn(self.implied1['name'], implied_names) self.assertIn(self.implied2['name'], implied_names) def _assert_one_rule_defined(self): r = self.get('/role_inferences/') rules = r.result['role_inferences'] self.assertEqual(self.prior['id'], rules[0]['prior_role']['id']) self.assertEqual(self.implied1['id'], rules[0]['implies'][0]['id']) self.assertEqual(self.implied1['name'], rules[0]['implies'][0]['name']) self.assertEqual(1, len(rules[0]['implies'])) def test_list_all_rules(self): self._setup_prior_two_implied() self._assert_two_rules_defined() self._delete_implied_role(self.prior, self.implied2) self._assert_one_rule_defined() def test_CRD_implied_roles(self): self._setup_prior_two_implied() self._assert_two_roles_implied() self._delete_implied_role(self.prior, self.implied2) self._assert_one_role_implied() def _create_three_roles(self): self.role_list = [] for _ in range(3): role = unit.new_role_ref() self.role_api.create_role(role['id'], role) self.role_list.append(role) def _create_test_domain_user_project(self): domain = unit.new_domain_ref() self.resource_api.create_domain(domain['id'], domain) user = unit.create_user(self.identity_api, domain_id=domain['id']) project = unit.new_project_ref(domain_id=domain['id']) self.resource_api.create_project(project['id'], project) return domain, user, project def _assign_top_role_to_user_on_project(self, user, project): self.assignment_api.add_role_to_user_and_project( user['id'], project['id'], self.role_list[0]['id']) def _build_effective_role_assignments_url(self, user): return '/role_assignments?effective&user.id=%(user_id)s' % { 'user_id': user['id']} def _assert_all_roles_in_assignment(self, response, user): # Now use the list role assignments api to check that all three roles # appear in the collection self.assertValidRoleAssignmentListResponse( response, expected_length=len(self.role_list), resource_url=self._build_effective_role_assignments_url(user)) def _assert_initial_assignment_in_effective(self, response, user, project): # The initial assignment should be there (the link url will be # generated and checked automatically since it matches the assignment) entity = self.build_role_assignment_entity( project_id=project['id'], user_id=user['id'], role_id=self.role_list[0]['id']) self.assertRoleAssignmentInListResponse(response, entity) def _assert_effective_role_for_implied_has_prior_in_links( self, response, user, project, prior_index, implied_index): # An effective role for an implied role will have the prior role # assignment in the links prior_link = '/prior_roles/%(prior)s/implies/%(implied)s' % { 'prior': self.role_list[prior_index]['id'], 'implied': self.role_list[implied_index]['id']} link = self.build_role_assignment_link( project_id=project['id'], user_id=user['id'], role_id=self.role_list[prior_index]['id']) entity = self.build_role_assignment_entity( link=link, project_id=project['id'], user_id=user['id'], role_id=self.role_list[implied_index]['id'], prior_link=prior_link) self.assertRoleAssignmentInListResponse(response, entity) def test_list_role_assignments_with_implied_roles(self): """Call ``GET /role_assignments`` with implied role grant. Test Plan: - Create a domain with a user and a project - Create 3 roles - Role 0 implies role 1 and role 1 implies role 2 - Assign the top role to the project - Issue the URL to check effective roles on project - this should return all 3 roles. - Check the links of the 3 roles indicate the prior role where appropriate """ (domain, user, project) = self._create_test_domain_user_project() self._create_three_roles() self._create_implied_role(self.role_list[0], self.role_list[1]) self._create_implied_role(self.role_list[1], self.role_list[2]) self._assign_top_role_to_user_on_project(user, project) response = self.get(self._build_effective_role_assignments_url(user)) r = response self._assert_all_roles_in_assignment(r, user) self._assert_initial_assignment_in_effective(response, user, project) self._assert_effective_role_for_implied_has_prior_in_links( response, user, project, 0, 1) self._assert_effective_role_for_implied_has_prior_in_links( response, user, project, 1, 2) def _create_named_role(self, name): role = unit.new_role_ref() role['name'] = name self.role_api.create_role(role['id'], role) return role def test_root_role_as_implied_role_forbidden(self): """Test root role is forbidden to be set as an implied role. Create 2 roles that are prohibited from being an implied role. Create 1 additional role which should be accepted as an implied role. Assure the prohibited role names cannot be set as an implied role. Assure the accepted role name which is not a member of the prohibited implied role list can be successfully set an implied role. """ prohibited_name1 = 'root1' prohibited_name2 = 'root2' accepted_name1 = 'implied1' prohibited_names = [prohibited_name1, prohibited_name2] self.config_fixture.config(group='assignment', prohibited_implied_role=prohibited_names) prior_role = self._create_role() prohibited_role1 = self._create_named_role(prohibited_name1) url = '/roles/{prior_role_id}/implies/{implied_role_id}'.format( prior_role_id=prior_role['id'], implied_role_id=prohibited_role1['id']) self.put(url, expected_status=http_client.FORBIDDEN) prohibited_role2 = self._create_named_role(prohibited_name2) url = '/roles/{prior_role_id}/implies/{implied_role_id}'.format( prior_role_id=prior_role['id'], implied_role_id=prohibited_role2['id']) self.put(url, expected_status=http_client.FORBIDDEN) accepted_role1 = self._create_named_role(accepted_name1) url = '/roles/{prior_role_id}/implies/{implied_role_id}'.format( prior_role_id=prior_role['id'], implied_role_id=accepted_role1['id']) self.put(url, expected_status=http_client.CREATED) def test_trusts_from_implied_role(self): self._create_three_roles() self._create_implied_role(self.role_list[0], self.role_list[1]) self._create_implied_role(self.role_list[1], self.role_list[2]) self._assign_top_role_to_user_on_project(self.user, self.project) # Create a trustee and assign the prior role to her trustee = unit.create_user(self.identity_api, domain_id=self.domain_id) ref = unit.new_trust_ref( trustor_user_id=self.user['id'], trustee_user_id=trustee['id'], project_id=self.project['id'], role_ids=[self.role_list[0]['id']]) r = self.post('/OS-TRUST/trusts', body={'trust': ref}) trust = r.result['trust'] # Only the role that was specified is in the trust, NOT implied roles self.assertEqual(self.role_list[0]['id'], trust['roles'][0]['id']) self.assertThat(trust['roles'], matchers.HasLength(1)) # Authenticate as the trustee auth_data = self.build_authentication_request( user_id=trustee['id'], password=trustee['password'], trust_id=trust['id']) r = self.v3_create_token(auth_data) token = r.result['token'] self.assertThat(token['roles'], matchers.HasLength(len(self.role_list))) for role in token['roles']: self.assertIn(role, self.role_list) for role in self.role_list: self.assertIn(role, token['roles']) def test_trusts_from_domain_specific_implied_role(self): self._create_three_roles() # Overwrite the first role with a domain specific role role = unit.new_role_ref(domain_id=self.domain_id) self.role_list[0] = self.role_api.create_role(role['id'], role) self._create_implied_role(self.role_list[0], self.role_list[1]) self._create_implied_role(self.role_list[1], self.role_list[2]) self._assign_top_role_to_user_on_project(self.user, self.project) # Create a trustee and assign the prior role to her trustee = unit.create_user(self.identity_api, domain_id=self.domain_id) ref = unit.new_trust_ref( trustor_user_id=self.user['id'], trustee_user_id=trustee['id'], project_id=self.project['id'], role_ids=[self.role_list[0]['id']]) r = self.post('/OS-TRUST/trusts', body={'trust': ref}) trust = r.result['trust'] # Only the role that was specified is in the trust, NOT implied roles self.assertEqual(self.role_list[0]['id'], trust['roles'][0]['id']) self.assertThat(trust['roles'], matchers.HasLength(1)) # Authenticate as the trustee auth_data = self.build_authentication_request( user_id=trustee['id'], password=trustee['password'], trust_id=trust['id']) r = self.v3_create_token(auth_data) token = r.result['token'] # The token should have the roles implies by the domain specific role, # but not the domain specific role itself. self.assertThat(token['roles'], matchers.HasLength(len(self.role_list) - 1)) for role in token['roles']: self.assertIn(role, self.role_list) for role in [self.role_list[1], self.role_list[2]]: self.assertIn(role, token['roles']) self.assertNotIn(self.role_list[0], token['roles']) class DomainSpecificRoleTests(test_v3.RestfulTestCase, unit.TestCase): def setUp(self): def create_role(domain_id=None): """Call ``POST /roles``.""" ref = unit.new_role_ref(domain_id=domain_id) r = self.post( '/roles', body={'role': ref}) return self.assertValidRoleResponse(r, ref) super(DomainSpecificRoleTests, self).setUp() self.domainA = unit.new_domain_ref() self.resource_api.create_domain(self.domainA['id'], self.domainA) self.domainB = unit.new_domain_ref() self.resource_api.create_domain(self.domainB['id'], self.domainB) self.global_role1 = create_role() self.global_role2 = create_role() # Since there maybe other global roles already created, let's count # them, so we can ensure we can check subsequent list responses # are correct r = self.get('/roles') self.existing_global_roles = len(r.result['roles']) # And now create some domain specific roles self.domainA_role1 = create_role(domain_id=self.domainA['id']) self.domainA_role2 = create_role(domain_id=self.domainA['id']) self.domainB_role = create_role(domain_id=self.domainB['id']) def test_get_and_list_domain_specific_roles(self): # Check we can get a domain specific role r = self.get('/roles/%s' % self.domainA_role1['id']) self.assertValidRoleResponse(r, self.domainA_role1) # If we list without specifying a domain, we should only get global # roles back. r = self.get('/roles') self.assertValidRoleListResponse( r, expected_length=self.existing_global_roles) self.assertRoleInListResponse(r, self.global_role1) self.assertRoleInListResponse(r, self.global_role2) self.assertRoleNotInListResponse(r, self.domainA_role1) self.assertRoleNotInListResponse(r, self.domainA_role2) self.assertRoleNotInListResponse(r, self.domainB_role) # Now list those in domainA, making sure that's all we get back r = self.get('/roles?domain_id=%s' % self.domainA['id']) self.assertValidRoleListResponse(r, expected_length=2) self.assertRoleInListResponse(r, self.domainA_role1) self.assertRoleInListResponse(r, self.domainA_role2) def test_update_domain_specific_roles(self): self.domainA_role1['name'] = uuid.uuid4().hex self.patch('/roles/%(role_id)s' % { 'role_id': self.domainA_role1['id']}, body={'role': self.domainA_role1}) r = self.get('/roles/%s' % self.domainA_role1['id']) self.assertValidRoleResponse(r, self.domainA_role1) def test_delete_domain_specific_roles(self): # Check delete only removes that one domain role self.delete('/roles/%(role_id)s' % { 'role_id': self.domainA_role1['id']}) self.get('/roles/%s' % self.domainA_role1['id'], expected_status=http_client.NOT_FOUND) # Now re-list those in domainA, making sure there's only one left r = self.get('/roles?domain_id=%s' % self.domainA['id']) self.assertValidRoleListResponse(r, expected_length=1) self.assertRoleInListResponse(r, self.domainA_role2) class ListUserProjectsTestCase(test_v3.RestfulTestCase): """Tests for /users//projects""" def load_sample_data(self): # do not load base class's data, keep it focused on the tests self.auths = [] self.domains = [] self.projects = [] self.roles = [] self.users = [] # Create 3 sets of domain, roles, projects, and users to demonstrate # the right user's data is loaded and only projects they can access # are returned. for _ in range(3): domain = unit.new_domain_ref() self.resource_api.create_domain(domain['id'], domain) user = unit.create_user(self.identity_api, domain_id=domain['id']) role = unit.new_role_ref() self.role_api.create_role(role['id'], role) self.assignment_api.create_grant(role['id'], user_id=user['id'], domain_id=domain['id']) project = unit.new_project_ref(domain_id=domain['id']) self.resource_api.create_project(project['id'], project) self.assignment_api.create_grant(role['id'], user_id=user['id'], project_id=project['id']) auth = self.build_authentication_request( user_id=user['id'], password=user['password'], domain_id=domain['id']) self.auths.append(auth) self.domains.append(domain) self.projects.append(project) self.roles.append(role) self.users.append(user) def test_list_all(self): for i in range(len(self.users)): user = self.users[i] auth = self.auths[i] url = '/users/%s/projects' % user['id'] result = self.get(url, auth=auth) projects_result = result.json['projects'] self.assertEqual(1, len(projects_result)) self.assertEqual(self.projects[i]['id'], projects_result[0]['id']) def test_list_enabled(self): for i in range(len(self.users)): user = self.users[i] auth = self.auths[i] # There are no disabled projects url = '/users/%s/projects?enabled=True' % user['id'] result = self.get(url, auth=auth) projects_result = result.json['projects'] self.assertEqual(1, len(projects_result)) self.assertEqual(self.projects[i]['id'], projects_result[0]['id']) def test_list_disabled(self): for i in range(len(self.users)): user = self.users[i] auth = self.auths[i] project = self.projects[i] # There are no disabled projects url = '/users/%s/projects?enabled=False' % user['id'] result = self.get(url, auth=auth) self.assertEqual(0, len(result.json['projects'])) # disable this one and check again project['enabled'] = False self.resource_api.update_project(project['id'], project) result = self.get(url, auth=auth) projects_result = result.json['projects'] self.assertEqual(1, len(projects_result)) self.assertEqual(self.projects[i]['id'], projects_result[0]['id']) def test_list_by_domain_id(self): for i in range(len(self.users)): user = self.users[i] domain = self.domains[i] auth = self.auths[i] # Try looking for projects with a non-existent domain_id url = '/users/%s/projects?domain_id=%s' % (user['id'], uuid.uuid4().hex) result = self.get(url, auth=auth) self.assertEqual(0, len(result.json['projects'])) # Now try a valid one url = '/users/%s/projects?domain_id=%s' % (user['id'], domain['id']) result = self.get(url, auth=auth) projects_result = result.json['projects'] self.assertEqual(1, len(projects_result)) self.assertEqual(self.projects[i]['id'], projects_result[0]['id']) keystone-9.0.0/keystone/tests/unit/test_v3_auth.py0000664000567000056710000063521412701407102023473 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime import itertools import json import operator import uuid from keystoneclient.common import cms import mock from oslo_config import cfg from oslo_log import versionutils from oslo_utils import timeutils from six.moves import http_client from six.moves import range from testtools import matchers from testtools import testcase from keystone import auth from keystone.auth.plugins import totp from keystone.common import utils from keystone.contrib.revoke import routers from keystone import exception from keystone.policy.backends import rules from keystone.tests.common import auth as common_auth from keystone.tests import unit from keystone.tests.unit import ksfixtures from keystone.tests.unit import test_v3 CONF = cfg.CONF class TestAuthInfo(common_auth.AuthTestMixin, testcase.TestCase): def setUp(self): super(TestAuthInfo, self).setUp() auth.controllers.load_auth_methods() def test_missing_auth_methods(self): auth_data = {'identity': {}} auth_data['identity']['token'] = {'id': uuid.uuid4().hex} self.assertRaises(exception.ValidationError, auth.controllers.AuthInfo.create, None, auth_data) def test_unsupported_auth_method(self): auth_data = {'methods': ['abc']} auth_data['abc'] = {'test': 'test'} auth_data = {'identity': auth_data} self.assertRaises(exception.AuthMethodNotSupported, auth.controllers.AuthInfo.create, None, auth_data) def test_missing_auth_method_data(self): auth_data = {'methods': ['password']} auth_data = {'identity': auth_data} self.assertRaises(exception.ValidationError, auth.controllers.AuthInfo.create, None, auth_data) def test_project_name_no_domain(self): auth_data = self.build_authentication_request( username='test', password='test', project_name='abc')['auth'] self.assertRaises(exception.ValidationError, auth.controllers.AuthInfo.create, None, auth_data) def test_both_project_and_domain_in_scope(self): auth_data = self.build_authentication_request( user_id='test', password='test', project_name='test', domain_name='test')['auth'] self.assertRaises(exception.ValidationError, auth.controllers.AuthInfo.create, None, auth_data) def test_get_method_names_duplicates(self): auth_data = self.build_authentication_request( token='test', user_id='test', password='test')['auth'] auth_data['identity']['methods'] = ['password', 'token', 'password', 'password'] context = None auth_info = auth.controllers.AuthInfo.create(context, auth_data) self.assertEqual(['password', 'token'], auth_info.get_method_names()) def test_get_method_data_invalid_method(self): auth_data = self.build_authentication_request( user_id='test', password='test')['auth'] context = None auth_info = auth.controllers.AuthInfo.create(context, auth_data) method_name = uuid.uuid4().hex self.assertRaises(exception.ValidationError, auth_info.get_method_data, method_name) class TokenAPITests(object): # Why is this not just setUp? Because TokenAPITests is not a test class # itself. If TokenAPITests became a subclass of the testcase, it would get # called by the enumerate-tests-in-file code. The way the functions get # resolved in Python for multiple inheritance means that a setUp in this # would get skipped by the testrunner. def doSetUp(self): r = self.v3_create_token(self.build_authentication_request( username=self.user['name'], user_domain_id=self.domain_id, password=self.user['password'])) self.v3_token_data = r.result self.v3_token = r.headers.get('X-Subject-Token') self.headers = {'X-Subject-Token': r.headers.get('X-Subject-Token')} def _make_auth_request(self, auth_data): resp = self.post('/auth/tokens', body=auth_data) token = resp.headers.get('X-Subject-Token') return token def _get_unscoped_token(self): auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password']) return self._make_auth_request(auth_data) def _get_domain_scoped_token(self): auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], domain_id=self.domain_id) return self._make_auth_request(auth_data) def _get_project_scoped_token(self): auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=self.project_id) return self._make_auth_request(auth_data) def _get_trust_scoped_token(self, trustee_user, trust): auth_data = self.build_authentication_request( user_id=trustee_user['id'], password=trustee_user['password'], trust_id=trust['id']) return self._make_auth_request(auth_data) def _create_trust(self, impersonation=False): # Create a trustee user trustee_user = unit.create_user(self.identity_api, domain_id=self.domain_id) ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=trustee_user['id'], project_id=self.project_id, impersonation=impersonation, role_ids=[self.role_id]) # Create a trust r = self.post('/OS-TRUST/trusts', body={'trust': ref}) trust = self.assertValidTrustResponse(r) return (trustee_user, trust) def _validate_token(self, token, expected_status=http_client.OK): return self.get( '/auth/tokens', headers={'X-Subject-Token': token}, expected_status=expected_status) def _revoke_token(self, token, expected_status=http_client.NO_CONTENT): return self.delete( '/auth/tokens', headers={'x-subject-token': token}, expected_status=expected_status) def _set_user_enabled(self, user, enabled=True): user['enabled'] = enabled self.identity_api.update_user(user['id'], user) def test_validate_unscoped_token(self): unscoped_token = self._get_unscoped_token() self._validate_token(unscoped_token) def test_revoke_unscoped_token(self): unscoped_token = self._get_unscoped_token() self._validate_token(unscoped_token) self._revoke_token(unscoped_token) self._validate_token(unscoped_token, expected_status=http_client.NOT_FOUND) def test_unscoped_token_is_invalid_after_disabling_user(self): unscoped_token = self._get_unscoped_token() # Make sure the token is valid self._validate_token(unscoped_token) # Disable the user self._set_user_enabled(self.user, enabled=False) # Ensure validating a token for a disabled user fails self.assertRaises(exception.TokenNotFound, self.token_provider_api.validate_token, unscoped_token) def test_unscoped_token_is_invalid_after_enabling_disabled_user(self): unscoped_token = self._get_unscoped_token() # Make sure the token is valid self._validate_token(unscoped_token) # Disable the user self._set_user_enabled(self.user, enabled=False) # Ensure validating a token for a disabled user fails self.assertRaises(exception.TokenNotFound, self.token_provider_api.validate_token, unscoped_token) # Enable the user self._set_user_enabled(self.user) # Ensure validating a token for a re-enabled user fails self.assertRaises(exception.TokenNotFound, self.token_provider_api.validate_token, unscoped_token) def test_unscoped_token_is_invalid_after_disabling_user_domain(self): unscoped_token = self._get_unscoped_token() # Make sure the token is valid self._validate_token(unscoped_token) # Disable the user's domain self.domain['enabled'] = False self.resource_api.update_domain(self.domain['id'], self.domain) # Ensure validating a token for a disabled user fails self.assertRaises(exception.TokenNotFound, self.token_provider_api.validate_token, unscoped_token) def test_unscoped_token_is_invalid_after_changing_user_password(self): unscoped_token = self._get_unscoped_token() # Make sure the token is valid self._validate_token(unscoped_token) # Change user's password self.user['password'] = 'Password1' self.identity_api.update_user(self.user['id'], self.user) # Ensure updating user's password revokes existing user's tokens self.assertRaises(exception.TokenNotFound, self.token_provider_api.validate_token, unscoped_token) def test_validate_domain_scoped_token(self): # Grant user access to domain self.assignment_api.create_grant(self.role['id'], user_id=self.user['id'], domain_id=self.domain['id']) domain_scoped_token = self._get_domain_scoped_token() resp = self._validate_token(domain_scoped_token) resp_json = json.loads(resp.body) self.assertIsNotNone(resp_json['token']['catalog']) self.assertIsNotNone(resp_json['token']['roles']) self.assertIsNotNone(resp_json['token']['domain']) def test_domain_scoped_token_is_invalid_after_disabling_user(self): # Grant user access to domain self.assignment_api.create_grant(self.role['id'], user_id=self.user['id'], domain_id=self.domain['id']) domain_scoped_token = self._get_domain_scoped_token() # Make sure the token is valid self._validate_token(domain_scoped_token) # Disable user self._set_user_enabled(self.user, enabled=False) # Ensure validating a token for a disabled user fails self.assertRaises(exception.TokenNotFound, self.token_provider_api.validate_token, domain_scoped_token) def test_domain_scoped_token_is_invalid_after_deleting_grant(self): # Grant user access to domain self.assignment_api.create_grant(self.role['id'], user_id=self.user['id'], domain_id=self.domain['id']) domain_scoped_token = self._get_domain_scoped_token() # Make sure the token is valid self._validate_token(domain_scoped_token) # Delete access to domain self.assignment_api.delete_grant(self.role['id'], user_id=self.user['id'], domain_id=self.domain['id']) # Ensure validating a token for a disabled user fails self.assertRaises(exception.TokenNotFound, self.token_provider_api.validate_token, domain_scoped_token) def test_domain_scoped_token_invalid_after_disabling_domain(self): # Grant user access to domain self.assignment_api.create_grant(self.role['id'], user_id=self.user['id'], domain_id=self.domain['id']) domain_scoped_token = self._get_domain_scoped_token() # Make sure the token is valid self._validate_token(domain_scoped_token) # Disable domain self.domain['enabled'] = False self.resource_api.update_domain(self.domain['id'], self.domain) # Ensure validating a token for a disabled domain fails self.assertRaises(exception.TokenNotFound, self.token_provider_api.validate_token, domain_scoped_token) def test_v2_validate_domain_scoped_token_returns_unauthorized(self): # Test that validating a domain scoped token in v2.0 returns # unauthorized. # Grant user access to domain self.assignment_api.create_grant(self.role['id'], user_id=self.user['id'], domain_id=self.domain['id']) scoped_token = self._get_domain_scoped_token() self.assertRaises(exception.Unauthorized, self.token_provider_api.validate_v2_token, scoped_token) def test_validate_project_scoped_token(self): project_scoped_token = self._get_project_scoped_token() self._validate_token(project_scoped_token) def test_revoke_project_scoped_token(self): project_scoped_token = self._get_project_scoped_token() self._validate_token(project_scoped_token) self._revoke_token(project_scoped_token) self._validate_token(project_scoped_token, expected_status=http_client.NOT_FOUND) def test_project_scoped_token_is_invalid_after_disabling_user(self): project_scoped_token = self._get_project_scoped_token() # Make sure the token is valid self._validate_token(project_scoped_token) # Disable the user self._set_user_enabled(self.user, enabled=False) # Ensure validating a token for a disabled user fails self.assertRaises(exception.TokenNotFound, self.token_provider_api.validate_token, project_scoped_token) def test_project_scoped_token_invalid_after_changing_user_password(self): project_scoped_token = self._get_project_scoped_token() # Make sure the token is valid self._validate_token(project_scoped_token) # Update user's password self.user['password'] = 'Password1' self.identity_api.update_user(self.user['id'], self.user) # Ensure updating user's password revokes existing tokens self.assertRaises(exception.TokenNotFound, self.token_provider_api.validate_token, project_scoped_token) def test_project_scoped_token_invalid_after_disabling_project(self): project_scoped_token = self._get_project_scoped_token() # Make sure the token is valid self._validate_token(project_scoped_token) # Disable project self.project['enabled'] = False self.resource_api.update_project(self.project['id'], self.project) # Ensure validating a token for a disabled project fails self.assertRaises(exception.TokenNotFound, self.token_provider_api.validate_token, project_scoped_token) def test_rescope_unscoped_token_with_trust(self): trustee_user, trust = self._create_trust() self._get_trust_scoped_token(trustee_user, trust) def test_validate_a_trust_scoped_token(self): trustee_user, trust = self._create_trust() trust_scoped_token = self._get_trust_scoped_token(trustee_user, trust) # Validate a trust scoped token self._validate_token(trust_scoped_token) def test_validate_a_trust_scoped_token_impersonated(self): trustee_user, trust = self._create_trust(impersonation=True) trust_scoped_token = self._get_trust_scoped_token(trustee_user, trust) # Validate a trust scoped token self._validate_token(trust_scoped_token) def test_revoke_trust_scoped_token(self): trustee_user, trust = self._create_trust() trust_scoped_token = self._get_trust_scoped_token(trustee_user, trust) # Validate a trust scoped token self._validate_token(trust_scoped_token) self._revoke_token(trust_scoped_token) self._validate_token(trust_scoped_token, expected_status=http_client.NOT_FOUND) def test_trust_scoped_token_is_invalid_after_disabling_trustee(self): trustee_user, trust = self._create_trust() trust_scoped_token = self._get_trust_scoped_token(trustee_user, trust) # Validate a trust scoped token self._validate_token(trust_scoped_token) # Disable trustee trustee_update_ref = dict(enabled=False) self.identity_api.update_user(trustee_user['id'], trustee_update_ref) # Ensure validating a token for a disabled user fails self.assertRaises(exception.TokenNotFound, self.token_provider_api.validate_token, trust_scoped_token) def test_trust_scoped_token_invalid_after_changing_trustee_password(self): trustee_user, trust = self._create_trust() trust_scoped_token = self._get_trust_scoped_token(trustee_user, trust) # Validate a trust scoped token self._validate_token(trust_scoped_token) # Change trustee's password trustee_update_ref = dict(password='Password1') self.identity_api.update_user(trustee_user['id'], trustee_update_ref) # Ensure updating trustee's password revokes existing tokens self.assertRaises(exception.TokenNotFound, self.token_provider_api.validate_token, trust_scoped_token) def test_trust_scoped_token_is_invalid_after_disabling_trustor(self): trustee_user, trust = self._create_trust() trust_scoped_token = self._get_trust_scoped_token(trustee_user, trust) # Validate a trust scoped token self._validate_token(trust_scoped_token) # Disable the trustor trustor_update_ref = dict(enabled=False) self.identity_api.update_user(self.user['id'], trustor_update_ref) # Ensure validating a token for a disabled user fails self.assertRaises(exception.TokenNotFound, self.token_provider_api.validate_token, trust_scoped_token) def test_trust_scoped_token_invalid_after_changing_trustor_password(self): trustee_user, trust = self._create_trust() trust_scoped_token = self._get_trust_scoped_token(trustee_user, trust) # Validate a trust scoped token self._validate_token(trust_scoped_token) # Change trustor's password trustor_update_ref = dict(password='Password1') self.identity_api.update_user(self.user['id'], trustor_update_ref) # Ensure updating trustor's password revokes existing user's tokens self.assertRaises(exception.TokenNotFound, self.token_provider_api.validate_token, trust_scoped_token) def test_trust_scoped_token_invalid_after_disabled_trustor_domain(self): trustee_user, trust = self._create_trust() trust_scoped_token = self._get_trust_scoped_token(trustee_user, trust) # Validate a trust scoped token self._validate_token(trust_scoped_token) # Disable trustor's domain self.domain['enabled'] = False self.resource_api.update_domain(self.domain['id'], self.domain) trustor_update_ref = dict(password='Password1') self.identity_api.update_user(self.user['id'], trustor_update_ref) # Ensure updating trustor's password revokes existing user's tokens self.assertRaises(exception.TokenNotFound, self.token_provider_api.validate_token, trust_scoped_token) def test_v2_validate_trust_scoped_token(self): # Test that validating an trust scoped token in v2.0 returns # unauthorized. trustee_user, trust = self._create_trust() trust_scoped_token = self._get_trust_scoped_token(trustee_user, trust) self.assertRaises(exception.Unauthorized, self.token_provider_api.validate_v2_token, trust_scoped_token) def test_default_fixture_scope_token(self): self.assertIsNotNone(self.get_scoped_token()) def test_v3_v2_intermix_new_default_domain(self): # If the default_domain_id config option is changed, then should be # able to validate a v3 token with user in the new domain. # 1) Create a new domain for the user. new_domain = unit.new_domain_ref() self.resource_api.create_domain(new_domain['id'], new_domain) # 2) Create user in new domain. new_user = unit.create_user(self.identity_api, domain_id=new_domain['id']) # 3) Update the default_domain_id config option to the new domain self.config_fixture.config( group='identity', default_domain_id=new_domain['id']) # 4) Get a token using v3 API. v3_token = self.get_requested_token(self.build_authentication_request( user_id=new_user['id'], password=new_user['password'])) # 5) Validate token using v2 API. self.admin_request( path='/v2.0/tokens/%s' % v3_token, token=self.get_admin_token(), method='GET') def test_v3_v2_intermix_domain_scoped_token_failed(self): # grant the domain role to user self.put( path='/domains/%s/users/%s/roles/%s' % ( self.domain['id'], self.user['id'], self.role['id'])) # generate a domain-scoped v3 token v3_token = self.get_requested_token(self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], domain_id=self.domain['id'])) # domain-scoped tokens are not supported by v2 self.admin_request( method='GET', path='/v2.0/tokens/%s' % v3_token, token=self.get_admin_token(), expected_status=http_client.UNAUTHORIZED) def test_v3_v2_intermix_non_default_project_succeed(self): # self.project is in a non-default domain v3_token = self.get_requested_token(self.build_authentication_request( user_id=self.default_domain_user['id'], password=self.default_domain_user['password'], project_id=self.project['id'])) # v2 cannot reference projects outside the default domain self.admin_request( method='GET', path='/v2.0/tokens/%s' % v3_token, token=self.get_admin_token()) def test_v3_v2_intermix_non_default_user_succeed(self): self.assignment_api.create_grant( self.role['id'], user_id=self.user['id'], project_id=self.default_domain_project['id']) # self.user is in a non-default domain v3_token = self.get_requested_token(self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=self.default_domain_project['id'])) # v2 cannot reference projects outside the default domain self.admin_request( method='GET', path='/v2.0/tokens/%s' % v3_token, token=self.get_admin_token()) def test_v3_v2_intermix_domain_scope_failed(self): self.assignment_api.create_grant( self.role['id'], user_id=self.default_domain_user['id'], domain_id=self.domain['id']) v3_token = self.get_requested_token(self.build_authentication_request( user_id=self.default_domain_user['id'], password=self.default_domain_user['password'], domain_id=self.domain['id'])) # v2 cannot reference projects outside the default domain self.admin_request( path='/v2.0/tokens/%s' % v3_token, token=self.get_admin_token(), method='GET', expected_status=http_client.UNAUTHORIZED) def test_v3_v2_unscoped_token_intermix(self): r = self.v3_create_token(self.build_authentication_request( user_id=self.default_domain_user['id'], password=self.default_domain_user['password'])) self.assertValidUnscopedTokenResponse(r) v3_token_data = r.result v3_token = r.headers.get('X-Subject-Token') # now validate the v3 token with v2 API r = self.admin_request( path='/v2.0/tokens/%s' % v3_token, token=self.get_admin_token(), method='GET') v2_token_data = r.result self.assertEqual(v2_token_data['access']['user']['id'], v3_token_data['token']['user']['id']) # v2 token time has not fraction of second precision so # just need to make sure the non fraction part agrees self.assertIn(v2_token_data['access']['token']['expires'][:-1], v3_token_data['token']['expires_at']) def test_v3_v2_token_intermix(self): # FIXME(gyee): PKI tokens are not interchangeable because token # data is baked into the token itself. r = self.v3_create_token(self.build_authentication_request( user_id=self.default_domain_user['id'], password=self.default_domain_user['password'], project_id=self.default_domain_project['id'])) self.assertValidProjectScopedTokenResponse(r) v3_token_data = r.result v3_token = r.headers.get('X-Subject-Token') # now validate the v3 token with v2 API r = self.admin_request( method='GET', path='/v2.0/tokens/%s' % v3_token, token=self.get_admin_token()) v2_token_data = r.result self.assertEqual(v2_token_data['access']['user']['id'], v3_token_data['token']['user']['id']) # v2 token time has not fraction of second precision so # just need to make sure the non fraction part agrees self.assertIn(v2_token_data['access']['token']['expires'][:-1], v3_token_data['token']['expires_at']) self.assertEqual(v2_token_data['access']['user']['roles'][0]['name'], v3_token_data['token']['roles'][0]['name']) def test_v2_v3_unscoped_token_intermix(self): r = self.admin_request( method='POST', path='/v2.0/tokens', body={ 'auth': { 'passwordCredentials': { 'userId': self.default_domain_user['id'], 'password': self.default_domain_user['password'] } } }) v2_token_data = r.result v2_token = v2_token_data['access']['token']['id'] r = self.get('/auth/tokens', headers={'X-Subject-Token': v2_token}) self.assertValidUnscopedTokenResponse(r) v3_token_data = r.result self.assertEqual(v2_token_data['access']['user']['id'], v3_token_data['token']['user']['id']) # v2 token time has not fraction of second precision so # just need to make sure the non fraction part agrees self.assertIn(v2_token_data['access']['token']['expires'][-1], v3_token_data['token']['expires_at']) def test_v2_v3_token_intermix(self): r = self.admin_request( path='/v2.0/tokens', method='POST', body={ 'auth': { 'passwordCredentials': { 'userId': self.default_domain_user['id'], 'password': self.default_domain_user['password'] }, 'tenantId': self.default_domain_project['id'] } }) v2_token_data = r.result v2_token = v2_token_data['access']['token']['id'] r = self.get('/auth/tokens', headers={'X-Subject-Token': v2_token}) self.assertValidProjectScopedTokenResponse(r) v3_token_data = r.result self.assertEqual(v2_token_data['access']['user']['id'], v3_token_data['token']['user']['id']) # v2 token time has not fraction of second precision so # just need to make sure the non fraction part agrees self.assertIn(v2_token_data['access']['token']['expires'][-1], v3_token_data['token']['expires_at']) self.assertEqual(v2_token_data['access']['user']['roles'][0]['name'], v3_token_data['token']['roles'][0]['name']) v2_issued_at = timeutils.parse_isotime( v2_token_data['access']['token']['issued_at']) v3_issued_at = timeutils.parse_isotime( v3_token_data['token']['issued_at']) self.assertEqual(v2_issued_at, v3_issued_at) def test_v2_token_deleted_on_v3(self): # Create a v2 token. body = { 'auth': { 'passwordCredentials': { 'userId': self.default_domain_user['id'], 'password': self.default_domain_user['password'] }, 'tenantId': self.default_domain_project['id'] } } r = self.admin_request( path='/v2.0/tokens', method='POST', body=body) v2_token = r.result['access']['token']['id'] # Delete the v2 token using v3. self.delete( '/auth/tokens', headers={'X-Subject-Token': v2_token}) # Attempting to use the deleted token on v2 should fail. self.admin_request( path='/v2.0/tenants', method='GET', token=v2_token, expected_status=http_client.UNAUTHORIZED) def test_rescoping_token(self): expires = self.v3_token_data['token']['expires_at'] # rescope the token r = self.v3_create_token(self.build_authentication_request( token=self.v3_token, project_id=self.project_id)) self.assertValidProjectScopedTokenResponse(r) # ensure token expiration stayed the same self.assertEqual(expires, r.result['token']['expires_at']) def test_check_token(self): self.head('/auth/tokens', headers=self.headers, expected_status=http_client.OK) def test_validate_token(self): r = self.get('/auth/tokens', headers=self.headers) self.assertValidUnscopedTokenResponse(r) def test_validate_missing_subject_token(self): self.get('/auth/tokens', expected_status=http_client.NOT_FOUND) def test_validate_missing_auth_token(self): self.admin_request( method='GET', path='/v3/projects', token=None, expected_status=http_client.UNAUTHORIZED) def test_validate_token_nocatalog(self): v3_token = self.get_requested_token(self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=self.project['id'])) r = self.get( '/auth/tokens?nocatalog', headers={'X-Subject-Token': v3_token}) self.assertValidProjectScopedTokenResponse(r, require_catalog=False) def test_is_admin_token_by_ids(self): self.config_fixture.config( group='resource', admin_project_domain_name=self.domain['name'], admin_project_name=self.project['name']) r = self.v3_create_token(self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=self.project['id'])) self.assertValidProjectScopedTokenResponse(r, is_admin_project=True) v3_token = r.headers.get('X-Subject-Token') r = self.get('/auth/tokens', headers={'X-Subject-Token': v3_token}) self.assertValidProjectScopedTokenResponse(r, is_admin_project=True) def test_is_admin_token_by_names(self): self.config_fixture.config( group='resource', admin_project_domain_name=self.domain['name'], admin_project_name=self.project['name']) r = self.v3_create_token(self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_domain_name=self.domain['name'], project_name=self.project['name'])) self.assertValidProjectScopedTokenResponse(r, is_admin_project=True) v3_token = r.headers.get('X-Subject-Token') r = self.get('/auth/tokens', headers={'X-Subject-Token': v3_token}) self.assertValidProjectScopedTokenResponse(r, is_admin_project=True) def test_token_for_non_admin_project_is_not_admin(self): self.config_fixture.config( group='resource', admin_project_domain_name=self.domain['name'], admin_project_name=uuid.uuid4().hex) r = self.v3_create_token(self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=self.project['id'])) self.assertValidProjectScopedTokenResponse(r, is_admin_project=False) v3_token = r.headers.get('X-Subject-Token') r = self.get('/auth/tokens', headers={'X-Subject-Token': v3_token}) self.assertValidProjectScopedTokenResponse(r, is_admin_project=False) def test_token_for_non_admin_domain_same_project_name_is_not_admin(self): self.config_fixture.config( group='resource', admin_project_domain_name=uuid.uuid4().hex, admin_project_name=self.project['name']) r = self.v3_create_token(self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=self.project['id'])) self.assertValidProjectScopedTokenResponse(r, is_admin_project=False) v3_token = r.headers.get('X-Subject-Token') r = self.get('/auth/tokens', headers={'X-Subject-Token': v3_token}) self.assertValidProjectScopedTokenResponse(r, is_admin_project=False) def test_only_admin_project_set_acts_as_non_admin(self): self.config_fixture.config( group='resource', admin_project_name=self.project['name']) r = self.v3_create_token(self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=self.project['id'])) self.assertValidProjectScopedTokenResponse(r, is_admin_project=False) v3_token = r.headers.get('X-Subject-Token') r = self.get('/auth/tokens', headers={'X-Subject-Token': v3_token}) self.assertValidProjectScopedTokenResponse(r, is_admin_project=False) def _create_role(self, domain_id=None): """Call ``POST /roles``.""" ref = unit.new_role_ref(domain_id=domain_id) r = self.post('/roles', body={'role': ref}) return self.assertValidRoleResponse(r, ref) def _create_implied_role(self, prior_id): implied = self._create_role() url = '/roles/%s/implies/%s' % (prior_id, implied['id']) self.put(url, expected_status=http_client.CREATED) return implied def _delete_implied_role(self, prior_role_id, implied_role_id): url = '/roles/%s/implies/%s' % (prior_role_id, implied_role_id) self.delete(url) def _get_scoped_token_roles(self, is_domain=False): if is_domain: v3_token = self.get_domain_scoped_token() else: v3_token = self.get_scoped_token() r = self.get('/auth/tokens', headers={'X-Subject-Token': v3_token}) v3_token_data = r.result token_roles = v3_token_data['token']['roles'] return token_roles def _create_implied_role_shows_in_v3_token(self, is_domain): token_roles = self._get_scoped_token_roles(is_domain) self.assertEqual(1, len(token_roles)) prior = token_roles[0]['id'] implied1 = self._create_implied_role(prior) token_roles = self._get_scoped_token_roles(is_domain) self.assertEqual(2, len(token_roles)) implied2 = self._create_implied_role(prior) token_roles = self._get_scoped_token_roles(is_domain) self.assertEqual(3, len(token_roles)) token_role_ids = [role['id'] for role in token_roles] self.assertIn(prior, token_role_ids) self.assertIn(implied1['id'], token_role_ids) self.assertIn(implied2['id'], token_role_ids) def test_create_implied_role_shows_in_v3_project_token(self): # regardless of the default chosen, this should always # test with the option set. self.config_fixture.config(group='token', infer_roles=True) self._create_implied_role_shows_in_v3_token(False) def test_create_implied_role_shows_in_v3_domain_token(self): self.config_fixture.config(group='token', infer_roles=True) self.assignment_api.create_grant(self.role['id'], user_id=self.user['id'], domain_id=self.domain['id']) self._create_implied_role_shows_in_v3_token(True) def test_group_assigned_implied_role_shows_in_v3_token(self): self.config_fixture.config(group='token', infer_roles=True) is_domain = False token_roles = self._get_scoped_token_roles(is_domain) self.assertEqual(1, len(token_roles)) new_role = self._create_role() prior = new_role['id'] new_group_ref = unit.new_group_ref(domain_id=self.domain['id']) new_group = self.identity_api.create_group(new_group_ref) self.assignment_api.create_grant(prior, group_id=new_group['id'], project_id=self.project['id']) token_roles = self._get_scoped_token_roles(is_domain) self.assertEqual(1, len(token_roles)) self.identity_api.add_user_to_group(self.user['id'], new_group['id']) token_roles = self._get_scoped_token_roles(is_domain) self.assertEqual(2, len(token_roles)) implied1 = self._create_implied_role(prior) token_roles = self._get_scoped_token_roles(is_domain) self.assertEqual(3, len(token_roles)) implied2 = self._create_implied_role(prior) token_roles = self._get_scoped_token_roles(is_domain) self.assertEqual(4, len(token_roles)) token_role_ids = [role['id'] for role in token_roles] self.assertIn(prior, token_role_ids) self.assertIn(implied1['id'], token_role_ids) self.assertIn(implied2['id'], token_role_ids) def test_multiple_implied_roles_show_in_v3_token(self): self.config_fixture.config(group='token', infer_roles=True) token_roles = self._get_scoped_token_roles() self.assertEqual(1, len(token_roles)) prior = token_roles[0]['id'] implied1 = self._create_implied_role(prior) implied2 = self._create_implied_role(prior) implied3 = self._create_implied_role(prior) token_roles = self._get_scoped_token_roles() self.assertEqual(4, len(token_roles)) token_role_ids = [role['id'] for role in token_roles] self.assertIn(prior, token_role_ids) self.assertIn(implied1['id'], token_role_ids) self.assertIn(implied2['id'], token_role_ids) self.assertIn(implied3['id'], token_role_ids) def test_chained_implied_role_shows_in_v3_token(self): self.config_fixture.config(group='token', infer_roles=True) token_roles = self._get_scoped_token_roles() self.assertEqual(1, len(token_roles)) prior = token_roles[0]['id'] implied1 = self._create_implied_role(prior) implied2 = self._create_implied_role(implied1['id']) implied3 = self._create_implied_role(implied2['id']) token_roles = self._get_scoped_token_roles() self.assertEqual(4, len(token_roles)) token_role_ids = [role['id'] for role in token_roles] self.assertIn(prior, token_role_ids) self.assertIn(implied1['id'], token_role_ids) self.assertIn(implied2['id'], token_role_ids) self.assertIn(implied3['id'], token_role_ids) def test_implied_role_disabled_by_config(self): self.config_fixture.config(group='token', infer_roles=False) token_roles = self._get_scoped_token_roles() self.assertEqual(1, len(token_roles)) prior = token_roles[0]['id'] implied1 = self._create_implied_role(prior) implied2 = self._create_implied_role(implied1['id']) self._create_implied_role(implied2['id']) token_roles = self._get_scoped_token_roles() self.assertEqual(1, len(token_roles)) token_role_ids = [role['id'] for role in token_roles] self.assertIn(prior, token_role_ids) def test_delete_implied_role_do_not_show_in_v3_token(self): self.config_fixture.config(group='token', infer_roles=True) token_roles = self._get_scoped_token_roles() prior = token_roles[0]['id'] implied = self._create_implied_role(prior) token_roles = self._get_scoped_token_roles() self.assertEqual(2, len(token_roles)) self._delete_implied_role(prior, implied['id']) token_roles = self._get_scoped_token_roles() self.assertEqual(1, len(token_roles)) def test_unrelated_implied_roles_do_not_change_v3_token(self): self.config_fixture.config(group='token', infer_roles=True) token_roles = self._get_scoped_token_roles() prior = token_roles[0]['id'] implied = self._create_implied_role(prior) token_roles = self._get_scoped_token_roles() self.assertEqual(2, len(token_roles)) unrelated = self._create_role() url = '/roles/%s/implies/%s' % (unrelated['id'], implied['id']) self.put(url, expected_status=http_client.CREATED) token_roles = self._get_scoped_token_roles() self.assertEqual(2, len(token_roles)) self._delete_implied_role(unrelated['id'], implied['id']) token_roles = self._get_scoped_token_roles() self.assertEqual(2, len(token_roles)) def test_domain_scpecific_roles_do_not_show_v3_token(self): self.config_fixture.config(group='token', infer_roles=True) initial_token_roles = self._get_scoped_token_roles() new_role = self._create_role(domain_id=self.domain_id) self.assignment_api.create_grant(new_role['id'], user_id=self.user['id'], project_id=self.project['id']) implied = self._create_implied_role(new_role['id']) token_roles = self._get_scoped_token_roles() self.assertEqual(len(initial_token_roles) + 1, len(token_roles)) # The implied role from the domain specific role should be in the # token, but not the domain specific role itself. token_role_ids = [role['id'] for role in token_roles] self.assertIn(implied['id'], token_role_ids) self.assertNotIn(new_role['id'], token_role_ids) def test_remove_all_roles_from_scope_result_in_404(self): # create a new user new_user = unit.create_user(self.identity_api, domain_id=self.domain['id']) # give the new user a role on a project path = '/projects/%s/users/%s/roles/%s' % ( self.project['id'], new_user['id'], self.role['id']) self.put(path=path) # authenticate as the new user and get a project-scoped token auth_data = self.build_authentication_request( user_id=new_user['id'], password=new_user['password'], project_id=self.project['id']) subject_token_id = self.v3_create_token(auth_data).headers.get( 'X-Subject-Token') # make sure the project-scoped token is valid headers = {'X-Subject-Token': subject_token_id} r = self.get('/auth/tokens', headers=headers) self.assertValidProjectScopedTokenResponse(r) # remove the roles from the user for the given scope path = '/projects/%s/users/%s/roles/%s' % ( self.project['id'], new_user['id'], self.role['id']) self.delete(path=path) # token validation should now result in 404 self.get('/auth/tokens', headers=headers, expected_status=http_client.NOT_FOUND) class TokenDataTests(object): """Test the data in specific token types.""" def test_unscoped_token_format(self): # ensure the unscoped token response contains the appropriate data r = self.get('/auth/tokens', headers=self.headers) self.assertValidUnscopedTokenResponse(r) def test_domain_scoped_token_format(self): # ensure the domain scoped token response contains the appropriate data self.assignment_api.create_grant( self.role['id'], user_id=self.default_domain_user['id'], domain_id=self.domain['id']) domain_scoped_token = self.get_requested_token( self.build_authentication_request( user_id=self.default_domain_user['id'], password=self.default_domain_user['password'], domain_id=self.domain['id']) ) self.headers['X-Subject-Token'] = domain_scoped_token r = self.get('/auth/tokens', headers=self.headers) self.assertValidDomainScopedTokenResponse(r) def test_project_scoped_token_format(self): # ensure project scoped token responses contains the appropriate data project_scoped_token = self.get_requested_token( self.build_authentication_request( user_id=self.default_domain_user['id'], password=self.default_domain_user['password'], project_id=self.default_domain_project['id']) ) self.headers['X-Subject-Token'] = project_scoped_token r = self.get('/auth/tokens', headers=self.headers) self.assertValidProjectScopedTokenResponse(r) def test_extra_data_in_unscoped_token_fails_validation(self): # ensure unscoped token response contains the appropriate data r = self.get('/auth/tokens', headers=self.headers) # populate the response result with some extra data r.result['token'][u'extra'] = unicode(uuid.uuid4().hex) self.assertRaises(exception.SchemaValidationError, self.assertValidUnscopedTokenResponse, r) def test_extra_data_in_domain_scoped_token_fails_validation(self): # ensure domain scoped token response contains the appropriate data self.assignment_api.create_grant( self.role['id'], user_id=self.default_domain_user['id'], domain_id=self.domain['id']) domain_scoped_token = self.get_requested_token( self.build_authentication_request( user_id=self.default_domain_user['id'], password=self.default_domain_user['password'], domain_id=self.domain['id']) ) self.headers['X-Subject-Token'] = domain_scoped_token r = self.get('/auth/tokens', headers=self.headers) # populate the response result with some extra data r.result['token'][u'extra'] = unicode(uuid.uuid4().hex) self.assertRaises(exception.SchemaValidationError, self.assertValidDomainScopedTokenResponse, r) def test_extra_data_in_project_scoped_token_fails_validation(self): # ensure project scoped token responses contains the appropriate data project_scoped_token = self.get_requested_token( self.build_authentication_request( user_id=self.default_domain_user['id'], password=self.default_domain_user['password'], project_id=self.default_domain_project['id']) ) self.headers['X-Subject-Token'] = project_scoped_token resp = self.get('/auth/tokens', headers=self.headers) # populate the response result with some extra data resp.result['token'][u'extra'] = unicode(uuid.uuid4().hex) self.assertRaises(exception.SchemaValidationError, self.assertValidProjectScopedTokenResponse, resp) class AllowRescopeScopedTokenDisabledTests(test_v3.RestfulTestCase): def config_overrides(self): super(AllowRescopeScopedTokenDisabledTests, self).config_overrides() self.config_fixture.config( group='token', allow_rescope_scoped_token=False) def test_rescoping_v3_to_v3_disabled(self): self.v3_create_token( self.build_authentication_request( token=self.get_scoped_token(), project_id=self.project_id), expected_status=http_client.FORBIDDEN) def _v2_token(self): body = { 'auth': { "tenantId": self.default_domain_project['id'], 'passwordCredentials': { 'userId': self.default_domain_user['id'], 'password': self.default_domain_user['password'] } }} resp = self.admin_request(path='/v2.0/tokens', method='POST', body=body) v2_token_data = resp.result return v2_token_data def _v2_token_from_token(self, token): body = { 'auth': { "tenantId": self.project['id'], "token": token }} self.admin_request(path='/v2.0/tokens', method='POST', body=body, expected_status=http_client.FORBIDDEN) def test_rescoping_v2_to_v3_disabled(self): token = self._v2_token() self.v3_create_token( self.build_authentication_request( token=token['access']['token']['id'], project_id=self.project_id), expected_status=http_client.FORBIDDEN) def test_rescoping_v3_to_v2_disabled(self): token = {'id': self.get_scoped_token()} self._v2_token_from_token(token) def test_rescoping_v2_to_v2_disabled(self): token = self._v2_token() self._v2_token_from_token(token['access']['token']) def test_rescoped_domain_token_disabled(self): self.domainA = unit.new_domain_ref() self.resource_api.create_domain(self.domainA['id'], self.domainA) self.assignment_api.create_grant(self.role['id'], user_id=self.user['id'], domain_id=self.domainA['id']) unscoped_token = self.get_requested_token( self.build_authentication_request( user_id=self.user['id'], password=self.user['password'])) # Get a domain-scoped token from the unscoped token domain_scoped_token = self.get_requested_token( self.build_authentication_request( token=unscoped_token, domain_id=self.domainA['id'])) self.v3_create_token( self.build_authentication_request( token=domain_scoped_token, project_id=self.project_id), expected_status=http_client.FORBIDDEN) class TestPKITokenAPIs(test_v3.RestfulTestCase, TokenAPITests, TokenDataTests): def config_overrides(self): super(TestPKITokenAPIs, self).config_overrides() self.config_fixture.config(group='token', provider='pki') def setUp(self): super(TestPKITokenAPIs, self).setUp() self.doSetUp() def verify_token(self, *args, **kwargs): return cms.verify_token(*args, **kwargs) def test_v3_token_id(self): auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password']) resp = self.v3_create_token(auth_data) token_data = resp.result token_id = resp.headers.get('X-Subject-Token') self.assertIn('expires_at', token_data['token']) decoded_token = self.verify_token(token_id, CONF.signing.certfile, CONF.signing.ca_certs) decoded_token_dict = json.loads(decoded_token) token_resp_dict = json.loads(resp.body) self.assertEqual(decoded_token_dict, token_resp_dict) # should be able to validate hash PKI token as well hash_token_id = cms.cms_hash_token(token_id) headers = {'X-Subject-Token': hash_token_id} resp = self.get('/auth/tokens', headers=headers) expected_token_data = resp.result self.assertDictEqual(expected_token_data, token_data) def test_v3_v2_hashed_pki_token_intermix(self): auth_data = self.build_authentication_request( user_id=self.default_domain_user['id'], password=self.default_domain_user['password'], project_id=self.default_domain_project['id']) resp = self.v3_create_token(auth_data) token_data = resp.result token = resp.headers.get('X-Subject-Token') # should be able to validate a hash PKI token in v2 too token = cms.cms_hash_token(token) path = '/v2.0/tokens/%s' % (token) resp = self.admin_request(path=path, token=self.get_admin_token(), method='GET') v2_token = resp.result self.assertEqual(v2_token['access']['user']['id'], token_data['token']['user']['id']) # v2 token time has not fraction of second precision so # just need to make sure the non fraction part agrees self.assertIn(v2_token['access']['token']['expires'][:-1], token_data['token']['expires_at']) self.assertEqual(v2_token['access']['user']['roles'][0]['name'], token_data['token']['roles'][0]['name']) class TestPKIZTokenAPIs(TestPKITokenAPIs): def config_overrides(self): super(TestPKIZTokenAPIs, self).config_overrides() self.config_fixture.config(group='token', provider='pkiz') def verify_token(self, *args, **kwargs): return cms.pkiz_verify(*args, **kwargs) class TestUUIDTokenAPIs(test_v3.RestfulTestCase, TokenAPITests, TokenDataTests): def config_overrides(self): super(TestUUIDTokenAPIs, self).config_overrides() self.config_fixture.config(group='token', provider='uuid') def setUp(self): super(TestUUIDTokenAPIs, self).setUp() self.doSetUp() def test_v3_token_id(self): auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password']) resp = self.v3_create_token(auth_data) token_data = resp.result token_id = resp.headers.get('X-Subject-Token') self.assertIn('expires_at', token_data['token']) self.assertFalse(cms.is_asn1_token(token_id)) class TestFernetTokenAPIs(test_v3.RestfulTestCase, TokenAPITests, TokenDataTests): def config_overrides(self): super(TestFernetTokenAPIs, self).config_overrides() self.config_fixture.config(group='token', provider='fernet') self.useFixture(ksfixtures.KeyRepository(self.config_fixture)) def setUp(self): super(TestFernetTokenAPIs, self).setUp() self.doSetUp() def _make_auth_request(self, auth_data): token = super(TestFernetTokenAPIs, self)._make_auth_request(auth_data) self.assertLess(len(token), 255) return token def test_validate_tampered_unscoped_token_fails(self): unscoped_token = self._get_unscoped_token() tampered_token = (unscoped_token[:50] + uuid.uuid4().hex + unscoped_token[50 + 32:]) self._validate_token(tampered_token, expected_status=http_client.NOT_FOUND) def test_validate_tampered_project_scoped_token_fails(self): project_scoped_token = self._get_project_scoped_token() tampered_token = (project_scoped_token[:50] + uuid.uuid4().hex + project_scoped_token[50 + 32:]) self._validate_token(tampered_token, expected_status=http_client.NOT_FOUND) def test_validate_tampered_trust_scoped_token_fails(self): trustee_user, trust = self._create_trust() trust_scoped_token = self._get_trust_scoped_token(trustee_user, trust) # Get a trust scoped token tampered_token = (trust_scoped_token[:50] + uuid.uuid4().hex + trust_scoped_token[50 + 32:]) self._validate_token(tampered_token, expected_status=http_client.NOT_FOUND) class TestTokenRevokeSelfAndAdmin(test_v3.RestfulTestCase): """Test token revoke using v3 Identity API by token owner and admin.""" def load_sample_data(self): """Load Sample Data for Test Cases. Two domains, domainA and domainB Two users in domainA, userNormalA and userAdminA One user in domainB, userAdminB """ super(TestTokenRevokeSelfAndAdmin, self).load_sample_data() # DomainA setup self.domainA = unit.new_domain_ref() self.resource_api.create_domain(self.domainA['id'], self.domainA) self.userAdminA = unit.create_user(self.identity_api, domain_id=self.domainA['id']) self.userNormalA = unit.create_user(self.identity_api, domain_id=self.domainA['id']) self.assignment_api.create_grant(self.role['id'], user_id=self.userAdminA['id'], domain_id=self.domainA['id']) def _policy_fixture(self): return ksfixtures.Policy(unit.dirs.etc('policy.v3cloudsample.json'), self.config_fixture) def test_user_revokes_own_token(self): user_token = self.get_requested_token( self.build_authentication_request( user_id=self.userNormalA['id'], password=self.userNormalA['password'], user_domain_id=self.domainA['id'])) self.assertNotEmpty(user_token) headers = {'X-Subject-Token': user_token} adminA_token = self.get_requested_token( self.build_authentication_request( user_id=self.userAdminA['id'], password=self.userAdminA['password'], domain_name=self.domainA['name'])) self.head('/auth/tokens', headers=headers, expected_status=http_client.OK, token=adminA_token) self.head('/auth/tokens', headers=headers, expected_status=http_client.OK, token=user_token) self.delete('/auth/tokens', headers=headers, token=user_token) # invalid X-Auth-Token and invalid X-Subject-Token self.head('/auth/tokens', headers=headers, expected_status=http_client.UNAUTHORIZED, token=user_token) # invalid X-Auth-Token and invalid X-Subject-Token self.delete('/auth/tokens', headers=headers, expected_status=http_client.UNAUTHORIZED, token=user_token) # valid X-Auth-Token and invalid X-Subject-Token self.delete('/auth/tokens', headers=headers, expected_status=http_client.NOT_FOUND, token=adminA_token) # valid X-Auth-Token and invalid X-Subject-Token self.head('/auth/tokens', headers=headers, expected_status=http_client.NOT_FOUND, token=adminA_token) def test_adminA_revokes_userA_token(self): user_token = self.get_requested_token( self.build_authentication_request( user_id=self.userNormalA['id'], password=self.userNormalA['password'], user_domain_id=self.domainA['id'])) self.assertNotEmpty(user_token) headers = {'X-Subject-Token': user_token} adminA_token = self.get_requested_token( self.build_authentication_request( user_id=self.userAdminA['id'], password=self.userAdminA['password'], domain_name=self.domainA['name'])) self.head('/auth/tokens', headers=headers, expected_status=http_client.OK, token=adminA_token) self.head('/auth/tokens', headers=headers, expected_status=http_client.OK, token=user_token) self.delete('/auth/tokens', headers=headers, token=adminA_token) # invalid X-Auth-Token and invalid X-Subject-Token self.head('/auth/tokens', headers=headers, expected_status=http_client.UNAUTHORIZED, token=user_token) # valid X-Auth-Token and invalid X-Subject-Token self.delete('/auth/tokens', headers=headers, expected_status=http_client.NOT_FOUND, token=adminA_token) # valid X-Auth-Token and invalid X-Subject-Token self.head('/auth/tokens', headers=headers, expected_status=http_client.NOT_FOUND, token=adminA_token) def test_adminB_fails_revoking_userA_token(self): # DomainB setup self.domainB = unit.new_domain_ref() self.resource_api.create_domain(self.domainB['id'], self.domainB) userAdminB = unit.create_user(self.identity_api, domain_id=self.domainB['id']) self.assignment_api.create_grant(self.role['id'], user_id=userAdminB['id'], domain_id=self.domainB['id']) user_token = self.get_requested_token( self.build_authentication_request( user_id=self.userNormalA['id'], password=self.userNormalA['password'], user_domain_id=self.domainA['id'])) headers = {'X-Subject-Token': user_token} adminB_token = self.get_requested_token( self.build_authentication_request( user_id=userAdminB['id'], password=userAdminB['password'], domain_name=self.domainB['name'])) self.head('/auth/tokens', headers=headers, expected_status=http_client.FORBIDDEN, token=adminB_token) self.delete('/auth/tokens', headers=headers, expected_status=http_client.FORBIDDEN, token=adminB_token) class TestTokenRevokeById(test_v3.RestfulTestCase): """Test token revocation on the v3 Identity API.""" def config_overrides(self): super(TestTokenRevokeById, self).config_overrides() self.config_fixture.config( group='token', provider='pki', revoke_by_id=False) def setUp(self): """Setup for Token Revoking Test Cases. As well as the usual housekeeping, create a set of domains, users, groups, roles and projects for the subsequent tests: - Two domains: A & B - Three users (1, 2 and 3) - Three groups (1, 2 and 3) - Two roles (1 and 2) - DomainA owns user1, domainB owns user2 and user3 - DomainA owns group1 and group2, domainB owns group3 - User1 and user2 are members of group1 - User3 is a member of group2 - Two projects: A & B, both in domainA - Group1 has role1 on Project A and B, meaning that user1 and user2 will get these roles by virtue of membership - User1, 2 and 3 have role1 assigned to projectA - Group1 has role1 on Project A and B, meaning that user1 and user2 will get role1 (duplicated) by virtue of membership - User1 has role2 assigned to domainA """ super(TestTokenRevokeById, self).setUp() # Start by creating a couple of domains and projects self.domainA = unit.new_domain_ref() self.resource_api.create_domain(self.domainA['id'], self.domainA) self.domainB = unit.new_domain_ref() self.resource_api.create_domain(self.domainB['id'], self.domainB) self.projectA = unit.new_project_ref(domain_id=self.domainA['id']) self.resource_api.create_project(self.projectA['id'], self.projectA) self.projectB = unit.new_project_ref(domain_id=self.domainA['id']) self.resource_api.create_project(self.projectB['id'], self.projectB) # Now create some users self.user1 = unit.create_user(self.identity_api, domain_id=self.domainA['id']) self.user2 = unit.create_user(self.identity_api, domain_id=self.domainB['id']) self.user3 = unit.create_user(self.identity_api, domain_id=self.domainB['id']) self.group1 = unit.new_group_ref(domain_id=self.domainA['id']) self.group1 = self.identity_api.create_group(self.group1) self.group2 = unit.new_group_ref(domain_id=self.domainA['id']) self.group2 = self.identity_api.create_group(self.group2) self.group3 = unit.new_group_ref(domain_id=self.domainB['id']) self.group3 = self.identity_api.create_group(self.group3) self.identity_api.add_user_to_group(self.user1['id'], self.group1['id']) self.identity_api.add_user_to_group(self.user2['id'], self.group1['id']) self.identity_api.add_user_to_group(self.user3['id'], self.group2['id']) self.role1 = unit.new_role_ref() self.role_api.create_role(self.role1['id'], self.role1) self.role2 = unit.new_role_ref() self.role_api.create_role(self.role2['id'], self.role2) self.assignment_api.create_grant(self.role2['id'], user_id=self.user1['id'], domain_id=self.domainA['id']) self.assignment_api.create_grant(self.role1['id'], user_id=self.user1['id'], project_id=self.projectA['id']) self.assignment_api.create_grant(self.role1['id'], user_id=self.user2['id'], project_id=self.projectA['id']) self.assignment_api.create_grant(self.role1['id'], user_id=self.user3['id'], project_id=self.projectA['id']) self.assignment_api.create_grant(self.role1['id'], group_id=self.group1['id'], project_id=self.projectA['id']) def test_unscoped_token_remains_valid_after_role_assignment(self): unscoped_token = self.get_requested_token( self.build_authentication_request( user_id=self.user1['id'], password=self.user1['password'])) scoped_token = self.get_requested_token( self.build_authentication_request( token=unscoped_token, project_id=self.projectA['id'])) # confirm both tokens are valid self.head('/auth/tokens', headers={'X-Subject-Token': unscoped_token}, expected_status=http_client.OK) self.head('/auth/tokens', headers={'X-Subject-Token': scoped_token}, expected_status=http_client.OK) # create a new role role = unit.new_role_ref() self.role_api.create_role(role['id'], role) # assign a new role self.put( '/projects/%(project_id)s/users/%(user_id)s/roles/%(role_id)s' % { 'project_id': self.projectA['id'], 'user_id': self.user1['id'], 'role_id': role['id']}) # both tokens should remain valid self.head('/auth/tokens', headers={'X-Subject-Token': unscoped_token}, expected_status=http_client.OK) self.head('/auth/tokens', headers={'X-Subject-Token': scoped_token}, expected_status=http_client.OK) def test_deleting_user_grant_revokes_token(self): """Test deleting a user grant revokes token. Test Plan: - Get a token for user1, scoped to ProjectA - Delete the grant user1 has on ProjectA - Check token is no longer valid """ auth_data = self.build_authentication_request( user_id=self.user1['id'], password=self.user1['password'], project_id=self.projectA['id']) token = self.get_requested_token(auth_data) # Confirm token is valid self.head('/auth/tokens', headers={'X-Subject-Token': token}, expected_status=http_client.OK) # Delete the grant, which should invalidate the token grant_url = ( '/projects/%(project_id)s/users/%(user_id)s/' 'roles/%(role_id)s' % { 'project_id': self.projectA['id'], 'user_id': self.user1['id'], 'role_id': self.role1['id']}) self.delete(grant_url) self.head('/auth/tokens', headers={'X-Subject-Token': token}, expected_status=http_client.NOT_FOUND) def role_data_fixtures(self): self.projectC = unit.new_project_ref(domain_id=self.domainA['id']) self.resource_api.create_project(self.projectC['id'], self.projectC) self.user4 = unit.create_user(self.identity_api, domain_id=self.domainB['id']) self.user5 = unit.create_user(self.identity_api, domain_id=self.domainA['id']) self.user6 = unit.create_user(self.identity_api, domain_id=self.domainA['id']) self.identity_api.add_user_to_group(self.user5['id'], self.group1['id']) self.assignment_api.create_grant(self.role1['id'], group_id=self.group1['id'], project_id=self.projectB['id']) self.assignment_api.create_grant(self.role2['id'], user_id=self.user4['id'], project_id=self.projectC['id']) self.assignment_api.create_grant(self.role1['id'], user_id=self.user6['id'], project_id=self.projectA['id']) self.assignment_api.create_grant(self.role1['id'], user_id=self.user6['id'], domain_id=self.domainA['id']) def test_deleting_role_revokes_token(self): """Test deleting a role revokes token. Add some additional test data, namely: - A third project (project C) - Three additional users - user4 owned by domainB and user5 and 6 owned by domainA (different domain ownership should not affect the test results, just provided to broaden test coverage) - User5 is a member of group1 - Group1 gets an additional assignment - role1 on projectB as well as its existing role1 on projectA - User4 has role2 on Project C - User6 has role1 on projectA and domainA - This allows us to create 5 tokens by virtue of different types of role assignment: - user1, scoped to ProjectA by virtue of user role1 assignment - user5, scoped to ProjectB by virtue of group role1 assignment - user4, scoped to ProjectC by virtue of user role2 assignment - user6, scoped to ProjectA by virtue of user role1 assignment - user6, scoped to DomainA by virtue of user role1 assignment - role1 is then deleted - Check the tokens on Project A and B, and DomainA are revoked, but not the one for Project C """ self.role_data_fixtures() # Now we are ready to start issuing requests auth_data = self.build_authentication_request( user_id=self.user1['id'], password=self.user1['password'], project_id=self.projectA['id']) tokenA = self.get_requested_token(auth_data) auth_data = self.build_authentication_request( user_id=self.user5['id'], password=self.user5['password'], project_id=self.projectB['id']) tokenB = self.get_requested_token(auth_data) auth_data = self.build_authentication_request( user_id=self.user4['id'], password=self.user4['password'], project_id=self.projectC['id']) tokenC = self.get_requested_token(auth_data) auth_data = self.build_authentication_request( user_id=self.user6['id'], password=self.user6['password'], project_id=self.projectA['id']) tokenD = self.get_requested_token(auth_data) auth_data = self.build_authentication_request( user_id=self.user6['id'], password=self.user6['password'], domain_id=self.domainA['id']) tokenE = self.get_requested_token(auth_data) # Confirm tokens are valid self.head('/auth/tokens', headers={'X-Subject-Token': tokenA}, expected_status=http_client.OK) self.head('/auth/tokens', headers={'X-Subject-Token': tokenB}, expected_status=http_client.OK) self.head('/auth/tokens', headers={'X-Subject-Token': tokenC}, expected_status=http_client.OK) self.head('/auth/tokens', headers={'X-Subject-Token': tokenD}, expected_status=http_client.OK) self.head('/auth/tokens', headers={'X-Subject-Token': tokenE}, expected_status=http_client.OK) # Delete the role, which should invalidate the tokens role_url = '/roles/%s' % self.role1['id'] self.delete(role_url) # Check the tokens that used role1 is invalid self.head('/auth/tokens', headers={'X-Subject-Token': tokenA}, expected_status=http_client.NOT_FOUND) self.head('/auth/tokens', headers={'X-Subject-Token': tokenB}, expected_status=http_client.NOT_FOUND) self.head('/auth/tokens', headers={'X-Subject-Token': tokenD}, expected_status=http_client.NOT_FOUND) self.head('/auth/tokens', headers={'X-Subject-Token': tokenE}, expected_status=http_client.NOT_FOUND) # ...but the one using role2 is still valid self.head('/auth/tokens', headers={'X-Subject-Token': tokenC}, expected_status=http_client.OK) def test_domain_user_role_assignment_maintains_token(self): """Test user-domain role assignment maintains existing token. Test Plan: - Get a token for user1, scoped to ProjectA - Create a grant for user1 on DomainB - Check token is still valid """ auth_data = self.build_authentication_request( user_id=self.user1['id'], password=self.user1['password'], project_id=self.projectA['id']) token = self.get_requested_token(auth_data) # Confirm token is valid self.head('/auth/tokens', headers={'X-Subject-Token': token}, expected_status=http_client.OK) # Assign a role, which should not affect the token grant_url = ( '/domains/%(domain_id)s/users/%(user_id)s/' 'roles/%(role_id)s' % { 'domain_id': self.domainB['id'], 'user_id': self.user1['id'], 'role_id': self.role1['id']}) self.put(grant_url) self.head('/auth/tokens', headers={'X-Subject-Token': token}, expected_status=http_client.OK) def test_disabling_project_revokes_token(self): token = self.get_requested_token( self.build_authentication_request( user_id=self.user3['id'], password=self.user3['password'], project_id=self.projectA['id'])) # confirm token is valid self.head('/auth/tokens', headers={'X-Subject-Token': token}, expected_status=http_client.OK) # disable the project, which should invalidate the token self.patch( '/projects/%(project_id)s' % {'project_id': self.projectA['id']}, body={'project': {'enabled': False}}) # user should no longer have access to the project self.head('/auth/tokens', headers={'X-Subject-Token': token}, expected_status=http_client.NOT_FOUND) self.v3_create_token( self.build_authentication_request( user_id=self.user3['id'], password=self.user3['password'], project_id=self.projectA['id']), expected_status=http_client.UNAUTHORIZED) def test_deleting_project_revokes_token(self): token = self.get_requested_token( self.build_authentication_request( user_id=self.user3['id'], password=self.user3['password'], project_id=self.projectA['id'])) # confirm token is valid self.head('/auth/tokens', headers={'X-Subject-Token': token}, expected_status=http_client.OK) # delete the project, which should invalidate the token self.delete( '/projects/%(project_id)s' % {'project_id': self.projectA['id']}) # user should no longer have access to the project self.head('/auth/tokens', headers={'X-Subject-Token': token}, expected_status=http_client.NOT_FOUND) self.v3_create_token( self.build_authentication_request( user_id=self.user3['id'], password=self.user3['password'], project_id=self.projectA['id']), expected_status=http_client.UNAUTHORIZED) def test_deleting_group_grant_revokes_tokens(self): """Test deleting a group grant revokes tokens. Test Plan: - Get a token for user1, scoped to ProjectA - Get a token for user2, scoped to ProjectA - Get a token for user3, scoped to ProjectA - Delete the grant group1 has on ProjectA - Check tokens for user1 & user2 are no longer valid, since user1 and user2 are members of group1 - Check token for user3 is invalid too """ auth_data = self.build_authentication_request( user_id=self.user1['id'], password=self.user1['password'], project_id=self.projectA['id']) token1 = self.get_requested_token(auth_data) auth_data = self.build_authentication_request( user_id=self.user2['id'], password=self.user2['password'], project_id=self.projectA['id']) token2 = self.get_requested_token(auth_data) auth_data = self.build_authentication_request( user_id=self.user3['id'], password=self.user3['password'], project_id=self.projectA['id']) token3 = self.get_requested_token(auth_data) # Confirm tokens are valid self.head('/auth/tokens', headers={'X-Subject-Token': token1}, expected_status=http_client.OK) self.head('/auth/tokens', headers={'X-Subject-Token': token2}, expected_status=http_client.OK) self.head('/auth/tokens', headers={'X-Subject-Token': token3}, expected_status=http_client.OK) # Delete the group grant, which should invalidate the # tokens for user1 and user2 grant_url = ( '/projects/%(project_id)s/groups/%(group_id)s/' 'roles/%(role_id)s' % { 'project_id': self.projectA['id'], 'group_id': self.group1['id'], 'role_id': self.role1['id']}) self.delete(grant_url) self.head('/auth/tokens', headers={'X-Subject-Token': token1}, expected_status=http_client.NOT_FOUND) self.head('/auth/tokens', headers={'X-Subject-Token': token2}, expected_status=http_client.NOT_FOUND) # But user3's token should be invalid too as revocation is done for # scope role & project self.head('/auth/tokens', headers={'X-Subject-Token': token3}, expected_status=http_client.NOT_FOUND) def test_domain_group_role_assignment_maintains_token(self): """Test domain-group role assignment maintains existing token. Test Plan: - Get a token for user1, scoped to ProjectA - Create a grant for group1 on DomainB - Check token is still longer valid """ auth_data = self.build_authentication_request( user_id=self.user1['id'], password=self.user1['password'], project_id=self.projectA['id']) token = self.get_requested_token(auth_data) # Confirm token is valid self.head('/auth/tokens', headers={'X-Subject-Token': token}, expected_status=http_client.OK) # Delete the grant, which should invalidate the token grant_url = ( '/domains/%(domain_id)s/groups/%(group_id)s/' 'roles/%(role_id)s' % { 'domain_id': self.domainB['id'], 'group_id': self.group1['id'], 'role_id': self.role1['id']}) self.put(grant_url) self.head('/auth/tokens', headers={'X-Subject-Token': token}, expected_status=http_client.OK) def test_group_membership_changes_revokes_token(self): """Test add/removal to/from group revokes token. Test Plan: - Get a token for user1, scoped to ProjectA - Get a token for user2, scoped to ProjectA - Remove user1 from group1 - Check token for user1 is no longer valid - Check token for user2 is still valid, even though user2 is also part of group1 - Add user2 to group2 - Check token for user2 is now no longer valid """ auth_data = self.build_authentication_request( user_id=self.user1['id'], password=self.user1['password'], project_id=self.projectA['id']) token1 = self.get_requested_token(auth_data) auth_data = self.build_authentication_request( user_id=self.user2['id'], password=self.user2['password'], project_id=self.projectA['id']) token2 = self.get_requested_token(auth_data) # Confirm tokens are valid self.head('/auth/tokens', headers={'X-Subject-Token': token1}, expected_status=http_client.OK) self.head('/auth/tokens', headers={'X-Subject-Token': token2}, expected_status=http_client.OK) # Remove user1 from group1, which should invalidate # the token self.delete('/groups/%(group_id)s/users/%(user_id)s' % { 'group_id': self.group1['id'], 'user_id': self.user1['id']}) self.head('/auth/tokens', headers={'X-Subject-Token': token1}, expected_status=http_client.NOT_FOUND) # But user2's token should still be valid self.head('/auth/tokens', headers={'X-Subject-Token': token2}, expected_status=http_client.OK) # Adding user2 to a group should not invalidate token self.put('/groups/%(group_id)s/users/%(user_id)s' % { 'group_id': self.group2['id'], 'user_id': self.user2['id']}) self.head('/auth/tokens', headers={'X-Subject-Token': token2}, expected_status=http_client.OK) def test_removing_role_assignment_does_not_affect_other_users(self): """Revoking a role from one user should not affect other users.""" # This group grant is not needed for the test self.delete( '/projects/%(project_id)s/groups/%(group_id)s/roles/%(role_id)s' % {'project_id': self.projectA['id'], 'group_id': self.group1['id'], 'role_id': self.role1['id']}) user1_token = self.get_requested_token( self.build_authentication_request( user_id=self.user1['id'], password=self.user1['password'], project_id=self.projectA['id'])) user3_token = self.get_requested_token( self.build_authentication_request( user_id=self.user3['id'], password=self.user3['password'], project_id=self.projectA['id'])) # delete relationships between user1 and projectA from setUp self.delete( '/projects/%(project_id)s/users/%(user_id)s/roles/%(role_id)s' % { 'project_id': self.projectA['id'], 'user_id': self.user1['id'], 'role_id': self.role1['id']}) # authorization for the first user should now fail self.head('/auth/tokens', headers={'X-Subject-Token': user1_token}, expected_status=http_client.NOT_FOUND) self.v3_create_token( self.build_authentication_request( user_id=self.user1['id'], password=self.user1['password'], project_id=self.projectA['id']), expected_status=http_client.UNAUTHORIZED) # authorization for the second user should still succeed self.head('/auth/tokens', headers={'X-Subject-Token': user3_token}, expected_status=http_client.OK) self.v3_create_token( self.build_authentication_request( user_id=self.user3['id'], password=self.user3['password'], project_id=self.projectA['id'])) def test_deleting_project_deletes_grants(self): # This is to make it a little bit more pretty with PEP8 role_path = ('/projects/%(project_id)s/users/%(user_id)s/' 'roles/%(role_id)s') role_path = role_path % {'user_id': self.user['id'], 'project_id': self.projectA['id'], 'role_id': self.role['id']} # grant the user a role on the project self.put(role_path) # delete the project, which should remove the roles self.delete( '/projects/%(project_id)s' % {'project_id': self.projectA['id']}) # Make sure that we get a 404 Not Found when heading that role. self.head(role_path, expected_status=http_client.NOT_FOUND) def get_v2_token(self, token=None, project_id=None): body = {'auth': {}, } if token: body['auth']['token'] = { 'id': token } else: body['auth']['passwordCredentials'] = { 'username': self.default_domain_user['name'], 'password': self.default_domain_user['password'], } if project_id: body['auth']['tenantId'] = project_id r = self.admin_request(method='POST', path='/v2.0/tokens', body=body) return r.json_body['access']['token']['id'] def test_revoke_v2_token_no_check(self): # Test that a V2 token can be revoked without validating it first. token = self.get_v2_token() self.delete('/auth/tokens', headers={'X-Subject-Token': token}) self.head('/auth/tokens', headers={'X-Subject-Token': token}, expected_status=http_client.NOT_FOUND) def test_revoke_token_from_token(self): # Test that a scoped token can be requested from an unscoped token, # the scoped token can be revoked, and the unscoped token remains # valid. unscoped_token = self.get_requested_token( self.build_authentication_request( user_id=self.user1['id'], password=self.user1['password'])) # Get a project-scoped token from the unscoped token project_scoped_token = self.get_requested_token( self.build_authentication_request( token=unscoped_token, project_id=self.projectA['id'])) # Get a domain-scoped token from the unscoped token domain_scoped_token = self.get_requested_token( self.build_authentication_request( token=unscoped_token, domain_id=self.domainA['id'])) # revoke the project-scoped token. self.delete('/auth/tokens', headers={'X-Subject-Token': project_scoped_token}) # The project-scoped token is invalidated. self.head('/auth/tokens', headers={'X-Subject-Token': project_scoped_token}, expected_status=http_client.NOT_FOUND) # The unscoped token should still be valid. self.head('/auth/tokens', headers={'X-Subject-Token': unscoped_token}, expected_status=http_client.OK) # The domain-scoped token should still be valid. self.head('/auth/tokens', headers={'X-Subject-Token': domain_scoped_token}, expected_status=http_client.OK) # revoke the domain-scoped token. self.delete('/auth/tokens', headers={'X-Subject-Token': domain_scoped_token}) # The domain-scoped token is invalid. self.head('/auth/tokens', headers={'X-Subject-Token': domain_scoped_token}, expected_status=http_client.NOT_FOUND) # The unscoped token should still be valid. self.head('/auth/tokens', headers={'X-Subject-Token': unscoped_token}, expected_status=http_client.OK) def test_revoke_token_from_token_v2(self): # Test that a scoped token can be requested from an unscoped token, # the scoped token can be revoked, and the unscoped token remains # valid. unscoped_token = self.get_v2_token() # Get a project-scoped token from the unscoped token project_scoped_token = self.get_v2_token( token=unscoped_token, project_id=self.default_domain_project['id']) # revoke the project-scoped token. self.delete('/auth/tokens', headers={'X-Subject-Token': project_scoped_token}) # The project-scoped token is invalidated. self.head('/auth/tokens', headers={'X-Subject-Token': project_scoped_token}, expected_status=http_client.NOT_FOUND) # The unscoped token should still be valid. self.head('/auth/tokens', headers={'X-Subject-Token': unscoped_token}, expected_status=http_client.OK) class TestTokenRevokeByAssignment(TestTokenRevokeById): def config_overrides(self): super(TestTokenRevokeById, self).config_overrides() self.config_fixture.config( group='token', provider='uuid', revoke_by_id=True) def test_removing_role_assignment_keeps_other_project_token_groups(self): """Test assignment isolation. Revoking a group role from one project should not invalidate all group users' tokens """ self.assignment_api.create_grant(self.role1['id'], group_id=self.group1['id'], project_id=self.projectB['id']) project_token = self.get_requested_token( self.build_authentication_request( user_id=self.user1['id'], password=self.user1['password'], project_id=self.projectB['id'])) other_project_token = self.get_requested_token( self.build_authentication_request( user_id=self.user1['id'], password=self.user1['password'], project_id=self.projectA['id'])) self.assignment_api.delete_grant(self.role1['id'], group_id=self.group1['id'], project_id=self.projectB['id']) # authorization for the projectA should still succeed self.head('/auth/tokens', headers={'X-Subject-Token': other_project_token}, expected_status=http_client.OK) # while token for the projectB should not self.head('/auth/tokens', headers={'X-Subject-Token': project_token}, expected_status=http_client.NOT_FOUND) revoked_tokens = [ t['id'] for t in self.token_provider_api.list_revoked_tokens()] # token is in token revocation list self.assertIn(project_token, revoked_tokens) class RevokeContribTests(test_v3.RestfulTestCase): @mock.patch.object(versionutils, 'report_deprecated_feature') def test_exception_happens(self, mock_deprecator): routers.RevokeExtension(mock.ANY) mock_deprecator.assert_called_once_with(mock.ANY, mock.ANY) args, _kwargs = mock_deprecator.call_args self.assertIn("Remove revoke_extension from", args[1]) class TestTokenRevokeApi(TestTokenRevokeById): """Test token revocation on the v3 Identity API.""" def config_overrides(self): super(TestTokenRevokeApi, self).config_overrides() self.config_fixture.config( group='token', provider='pki', revoke_by_id=False) def assertValidDeletedProjectResponse(self, events_response, project_id): events = events_response['events'] self.assertEqual(1, len(events)) self.assertEqual(project_id, events[0]['project_id']) self.assertIsNotNone(events[0]['issued_before']) self.assertIsNotNone(events_response['links']) del (events_response['events'][0]['issued_before']) del (events_response['links']) expected_response = {'events': [{'project_id': project_id}]} self.assertEqual(expected_response, events_response) def assertDomainAndProjectInList(self, events_response, domain_id): events = events_response['events'] self.assertEqual(2, len(events)) self.assertEqual(domain_id, events[0]['project_id']) self.assertEqual(domain_id, events[1]['domain_id']) self.assertIsNotNone(events[0]['issued_before']) self.assertIsNotNone(events[1]['issued_before']) self.assertIsNotNone(events_response['links']) del (events_response['events'][0]['issued_before']) del (events_response['events'][1]['issued_before']) del (events_response['links']) expected_response = {'events': [{'project_id': domain_id}, {'domain_id': domain_id}]} self.assertEqual(expected_response, events_response) def assertValidRevokedTokenResponse(self, events_response, **kwargs): events = events_response['events'] self.assertEqual(1, len(events)) for k, v in kwargs.items(): self.assertEqual(v, events[0].get(k)) self.assertIsNotNone(events[0]['issued_before']) self.assertIsNotNone(events_response['links']) del (events_response['events'][0]['issued_before']) del (events_response['links']) expected_response = {'events': [kwargs]} self.assertEqual(expected_response, events_response) def test_revoke_token(self): scoped_token = self.get_scoped_token() headers = {'X-Subject-Token': scoped_token} response = self.get('/auth/tokens', headers=headers).json_body['token'] self.delete('/auth/tokens', headers=headers) self.head('/auth/tokens', headers=headers, expected_status=http_client.NOT_FOUND) events_response = self.get('/OS-REVOKE/events').json_body self.assertValidRevokedTokenResponse(events_response, audit_id=response['audit_ids'][0]) def test_revoke_v2_token(self): token = self.get_v2_token() headers = {'X-Subject-Token': token} response = self.get('/auth/tokens', headers=headers).json_body['token'] self.delete('/auth/tokens', headers=headers) self.head('/auth/tokens', headers=headers, expected_status=http_client.NOT_FOUND) events_response = self.get('/OS-REVOKE/events').json_body self.assertValidRevokedTokenResponse( events_response, audit_id=response['audit_ids'][0]) def test_revoke_by_id_false_returns_gone(self): self.get('/auth/tokens/OS-PKI/revoked', expected_status=http_client.GONE) def test_list_delete_project_shows_in_event_list(self): self.role_data_fixtures() events = self.get('/OS-REVOKE/events').json_body['events'] self.assertEqual([], events) self.delete( '/projects/%(project_id)s' % {'project_id': self.projectA['id']}) events_response = self.get('/OS-REVOKE/events').json_body self.assertValidDeletedProjectResponse(events_response, self.projectA['id']) def test_disable_domain_shows_in_event_list(self): events = self.get('/OS-REVOKE/events').json_body['events'] self.assertEqual([], events) disable_body = {'domain': {'enabled': False}} self.patch( '/domains/%(project_id)s' % {'project_id': self.domainA['id']}, body=disable_body) events = self.get('/OS-REVOKE/events').json_body self.assertDomainAndProjectInList(events, self.domainA['id']) def assertEventDataInList(self, events, **kwargs): found = False for e in events: for key, value in kwargs.items(): try: if e[key] != value: break except KeyError: # Break the loop and present a nice error instead of # KeyError break else: # If the value of the event[key] matches the value of the kwarg # for each item in kwargs, the event was fully matched and # the assertTrue below should succeed. found = True self.assertTrue(found, 'event with correct values not in list, expected to ' 'find event with key-value pairs. Expected: ' '"%(expected)s" Events: "%(events)s"' % {'expected': ','.join( ["'%s=%s'" % (k, v) for k, v in kwargs.items()]), 'events': events}) def test_list_delete_token_shows_in_event_list(self): self.role_data_fixtures() events = self.get('/OS-REVOKE/events').json_body['events'] self.assertEqual([], events) scoped_token = self.get_scoped_token() headers = {'X-Subject-Token': scoped_token} auth_req = self.build_authentication_request(token=scoped_token) response = self.v3_create_token(auth_req) token2 = response.json_body['token'] headers2 = {'X-Subject-Token': response.headers['X-Subject-Token']} response = self.v3_create_token(auth_req) response.json_body['token'] headers3 = {'X-Subject-Token': response.headers['X-Subject-Token']} self.head('/auth/tokens', headers=headers, expected_status=http_client.OK) self.head('/auth/tokens', headers=headers2, expected_status=http_client.OK) self.head('/auth/tokens', headers=headers3, expected_status=http_client.OK) self.delete('/auth/tokens', headers=headers) # NOTE(ayoung): not deleting token3, as it should be deleted # by previous events_response = self.get('/OS-REVOKE/events').json_body events = events_response['events'] self.assertEqual(1, len(events)) self.assertEventDataInList( events, audit_id=token2['audit_ids'][1]) self.head('/auth/tokens', headers=headers, expected_status=http_client.NOT_FOUND) self.head('/auth/tokens', headers=headers2, expected_status=http_client.OK) self.head('/auth/tokens', headers=headers3, expected_status=http_client.OK) def test_list_with_filter(self): self.role_data_fixtures() events = self.get('/OS-REVOKE/events').json_body['events'] self.assertEqual(0, len(events)) scoped_token = self.get_scoped_token() headers = {'X-Subject-Token': scoped_token} auth = self.build_authentication_request(token=scoped_token) headers2 = {'X-Subject-Token': self.get_requested_token(auth)} self.delete('/auth/tokens', headers=headers) self.delete('/auth/tokens', headers=headers2) events = self.get('/OS-REVOKE/events').json_body['events'] self.assertEqual(2, len(events)) future = utils.isotime(timeutils.utcnow() + datetime.timedelta(seconds=1000)) events = self.get('/OS-REVOKE/events?since=%s' % (future) ).json_body['events'] self.assertEqual(0, len(events)) class TestAuthExternalDisabled(test_v3.RestfulTestCase): def config_overrides(self): super(TestAuthExternalDisabled, self).config_overrides() self.config_fixture.config( group='auth', methods=['password', 'token']) def test_remote_user_disabled(self): api = auth.controllers.Auth() remote_user = '%s@%s' % (self.user['name'], self.domain['name']) context, auth_info, auth_context = self.build_external_auth_request( remote_user) self.assertRaises(exception.Unauthorized, api.authenticate, context, auth_info, auth_context) class TestAuthExternalDomain(test_v3.RestfulTestCase): content_type = 'json' def config_overrides(self): super(TestAuthExternalDomain, self).config_overrides() self.kerberos = False self.auth_plugin_config_override(external='Domain') def test_remote_user_with_realm(self): api = auth.controllers.Auth() remote_user = self.user['name'] remote_domain = self.domain['name'] context, auth_info, auth_context = self.build_external_auth_request( remote_user, remote_domain=remote_domain, kerberos=self.kerberos) api.authenticate(context, auth_info, auth_context) self.assertEqual(self.user['id'], auth_context['user_id']) # Now test to make sure the user name can, itself, contain the # '@' character. user = {'name': 'myname@mydivision'} self.identity_api.update_user(self.user['id'], user) remote_user = user['name'] context, auth_info, auth_context = self.build_external_auth_request( remote_user, remote_domain=remote_domain, kerberos=self.kerberos) api.authenticate(context, auth_info, auth_context) self.assertEqual(self.user['id'], auth_context['user_id']) def test_project_id_scoped_with_remote_user(self): self.config_fixture.config(group='token', bind=['kerberos']) auth_data = self.build_authentication_request( project_id=self.project['id'], kerberos=self.kerberos) remote_user = self.user['name'] remote_domain = self.domain['name'] self.admin_app.extra_environ.update({'REMOTE_USER': remote_user, 'REMOTE_DOMAIN': remote_domain, 'AUTH_TYPE': 'Negotiate'}) r = self.v3_create_token(auth_data) token = self.assertValidProjectScopedTokenResponse(r) self.assertEqual(self.user['name'], token['bind']['kerberos']) def test_unscoped_bind_with_remote_user(self): self.config_fixture.config(group='token', bind=['kerberos']) auth_data = self.build_authentication_request(kerberos=self.kerberos) remote_user = self.user['name'] remote_domain = self.domain['name'] self.admin_app.extra_environ.update({'REMOTE_USER': remote_user, 'REMOTE_DOMAIN': remote_domain, 'AUTH_TYPE': 'Negotiate'}) r = self.v3_create_token(auth_data) token = self.assertValidUnscopedTokenResponse(r) self.assertEqual(self.user['name'], token['bind']['kerberos']) class TestAuthExternalDefaultDomain(test_v3.RestfulTestCase): content_type = 'json' def config_overrides(self): super(TestAuthExternalDefaultDomain, self).config_overrides() self.kerberos = False self.auth_plugin_config_override( external='keystone.auth.plugins.external.DefaultDomain') def test_remote_user_with_default_domain(self): api = auth.controllers.Auth() remote_user = self.default_domain_user['name'] context, auth_info, auth_context = self.build_external_auth_request( remote_user, kerberos=self.kerberos) api.authenticate(context, auth_info, auth_context) self.assertEqual(self.default_domain_user['id'], auth_context['user_id']) # Now test to make sure the user name can, itself, contain the # '@' character. user = {'name': 'myname@mydivision'} self.identity_api.update_user(self.default_domain_user['id'], user) remote_user = user['name'] context, auth_info, auth_context = self.build_external_auth_request( remote_user, kerberos=self.kerberos) api.authenticate(context, auth_info, auth_context) self.assertEqual(self.default_domain_user['id'], auth_context['user_id']) def test_project_id_scoped_with_remote_user(self): self.config_fixture.config(group='token', bind=['kerberos']) auth_data = self.build_authentication_request( project_id=self.default_domain_project['id'], kerberos=self.kerberos) remote_user = self.default_domain_user['name'] self.admin_app.extra_environ.update({'REMOTE_USER': remote_user, 'AUTH_TYPE': 'Negotiate'}) r = self.v3_create_token(auth_data) token = self.assertValidProjectScopedTokenResponse(r) self.assertEqual(self.default_domain_user['name'], token['bind']['kerberos']) def test_unscoped_bind_with_remote_user(self): self.config_fixture.config(group='token', bind=['kerberos']) auth_data = self.build_authentication_request(kerberos=self.kerberos) remote_user = self.default_domain_user['name'] self.admin_app.extra_environ.update({'REMOTE_USER': remote_user, 'AUTH_TYPE': 'Negotiate'}) r = self.v3_create_token(auth_data) token = self.assertValidUnscopedTokenResponse(r) self.assertEqual(self.default_domain_user['name'], token['bind']['kerberos']) class TestAuthKerberos(TestAuthExternalDomain): def config_overrides(self): super(TestAuthKerberos, self).config_overrides() self.kerberos = True self.auth_plugin_config_override( methods=['kerberos', 'password', 'token']) class TestAuth(test_v3.RestfulTestCase): def test_unscoped_token_with_user_id(self): auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password']) r = self.v3_create_token(auth_data) self.assertValidUnscopedTokenResponse(r) def test_unscoped_token_with_user_domain_id(self): auth_data = self.build_authentication_request( username=self.user['name'], user_domain_id=self.domain['id'], password=self.user['password']) r = self.v3_create_token(auth_data) self.assertValidUnscopedTokenResponse(r) def test_unscoped_token_with_user_domain_name(self): auth_data = self.build_authentication_request( username=self.user['name'], user_domain_name=self.domain['name'], password=self.user['password']) r = self.v3_create_token(auth_data) self.assertValidUnscopedTokenResponse(r) def test_project_id_scoped_token_with_user_id(self): auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=self.project['id']) r = self.v3_create_token(auth_data) self.assertValidProjectScopedTokenResponse(r) def _second_project_as_default(self): ref = unit.new_project_ref(domain_id=self.domain_id) r = self.post('/projects', body={'project': ref}) project = self.assertValidProjectResponse(r, ref) # grant the user a role on the project self.put( '/projects/%(project_id)s/users/%(user_id)s/roles/%(role_id)s' % { 'user_id': self.user['id'], 'project_id': project['id'], 'role_id': self.role['id']}) # set the user's preferred project body = {'user': {'default_project_id': project['id']}} r = self.patch('/users/%(user_id)s' % { 'user_id': self.user['id']}, body=body) self.assertValidUserResponse(r) return project def test_default_project_id_scoped_token_with_user_id(self): project = self._second_project_as_default() # attempt to authenticate without requesting a project auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password']) r = self.v3_create_token(auth_data) self.assertValidProjectScopedTokenResponse(r) self.assertEqual(project['id'], r.result['token']['project']['id']) def test_default_project_id_scoped_token_with_user_id_no_catalog(self): project = self._second_project_as_default() # attempt to authenticate without requesting a project auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password']) r = self.post('/auth/tokens?nocatalog', body=auth_data, noauth=True) self.assertValidProjectScopedTokenResponse(r, require_catalog=False) self.assertEqual(project['id'], r.result['token']['project']['id']) def test_explicit_unscoped_token(self): self._second_project_as_default() # attempt to authenticate without requesting a project auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], unscoped="unscoped") r = self.post('/auth/tokens', body=auth_data, noauth=True) self.assertIsNone(r.result['token'].get('project')) self.assertIsNone(r.result['token'].get('domain')) self.assertIsNone(r.result['token'].get('scope')) def test_implicit_project_id_scoped_token_with_user_id_no_catalog(self): # attempt to authenticate without requesting a project auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=self.project['id']) r = self.post('/auth/tokens?nocatalog', body=auth_data, noauth=True) self.assertValidProjectScopedTokenResponse(r, require_catalog=False) self.assertEqual(self.project['id'], r.result['token']['project']['id']) def test_auth_catalog_attributes(self): auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=self.project['id']) r = self.v3_create_token(auth_data) catalog = r.result['token']['catalog'] self.assertEqual(1, len(catalog)) catalog = catalog[0] self.assertEqual(self.service['id'], catalog['id']) self.assertEqual(self.service['name'], catalog['name']) self.assertEqual(self.service['type'], catalog['type']) endpoint = catalog['endpoints'] self.assertEqual(1, len(endpoint)) endpoint = endpoint[0] self.assertEqual(self.endpoint['id'], endpoint['id']) self.assertEqual(self.endpoint['interface'], endpoint['interface']) self.assertEqual(self.endpoint['region_id'], endpoint['region_id']) self.assertEqual(self.endpoint['url'], endpoint['url']) def _check_disabled_endpoint_result(self, catalog, disabled_endpoint_id): endpoints = catalog[0]['endpoints'] endpoint_ids = [ep['id'] for ep in endpoints] self.assertEqual([self.endpoint_id], endpoint_ids) def test_auth_catalog_disabled_service(self): """On authenticate, get a catalog that excludes disabled services.""" # although the child endpoint is enabled, the service is disabled self.assertTrue(self.endpoint['enabled']) self.catalog_api.update_service( self.endpoint['service_id'], {'enabled': False}) service = self.catalog_api.get_service(self.endpoint['service_id']) self.assertFalse(service['enabled']) auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=self.project['id']) r = self.v3_create_token(auth_data) self.assertEqual([], r.result['token']['catalog']) def test_auth_catalog_disabled_endpoint(self): """On authenticate, get a catalog that excludes disabled endpoints.""" # Create a disabled endpoint that's like the enabled one. disabled_endpoint_ref = copy.copy(self.endpoint) disabled_endpoint_id = uuid.uuid4().hex disabled_endpoint_ref.update({ 'id': disabled_endpoint_id, 'enabled': False, 'interface': 'internal' }) self.catalog_api.create_endpoint(disabled_endpoint_id, disabled_endpoint_ref) auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=self.project['id']) r = self.v3_create_token(auth_data) self._check_disabled_endpoint_result(r.result['token']['catalog'], disabled_endpoint_id) def test_project_id_scoped_token_with_user_id_unauthorized(self): project = unit.new_project_ref(domain_id=self.domain_id) self.resource_api.create_project(project['id'], project) auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=project['id']) self.v3_create_token(auth_data, expected_status=http_client.UNAUTHORIZED) def test_user_and_group_roles_scoped_token(self): """Test correct roles are returned in scoped token. Test Plan: - Create a domain, with 1 project, 2 users (user1 and user2) and 2 groups (group1 and group2) - Make user1 a member of group1, user2 a member of group2 - Create 8 roles, assigning them to each of the 8 combinations of users/groups on domain/project - Get a project scoped token for user1, checking that the right two roles are returned (one directly assigned, one by virtue of group membership) - Repeat this for a domain scoped token - Make user1 also a member of group2 - Get another scoped token making sure the additional role shows up - User2 is just here as a spoiler, to make sure we don't get any roles uniquely assigned to it returned in any of our tokens """ domainA = unit.new_domain_ref() self.resource_api.create_domain(domainA['id'], domainA) projectA = unit.new_project_ref(domain_id=domainA['id']) self.resource_api.create_project(projectA['id'], projectA) user1 = unit.create_user(self.identity_api, domain_id=domainA['id']) user2 = unit.create_user(self.identity_api, domain_id=domainA['id']) group1 = unit.new_group_ref(domain_id=domainA['id']) group1 = self.identity_api.create_group(group1) group2 = unit.new_group_ref(domain_id=domainA['id']) group2 = self.identity_api.create_group(group2) self.identity_api.add_user_to_group(user1['id'], group1['id']) self.identity_api.add_user_to_group(user2['id'], group2['id']) # Now create all the roles and assign them role_list = [] for _ in range(8): role = unit.new_role_ref() self.role_api.create_role(role['id'], role) role_list.append(role) self.assignment_api.create_grant(role_list[0]['id'], user_id=user1['id'], domain_id=domainA['id']) self.assignment_api.create_grant(role_list[1]['id'], user_id=user1['id'], project_id=projectA['id']) self.assignment_api.create_grant(role_list[2]['id'], user_id=user2['id'], domain_id=domainA['id']) self.assignment_api.create_grant(role_list[3]['id'], user_id=user2['id'], project_id=projectA['id']) self.assignment_api.create_grant(role_list[4]['id'], group_id=group1['id'], domain_id=domainA['id']) self.assignment_api.create_grant(role_list[5]['id'], group_id=group1['id'], project_id=projectA['id']) self.assignment_api.create_grant(role_list[6]['id'], group_id=group2['id'], domain_id=domainA['id']) self.assignment_api.create_grant(role_list[7]['id'], group_id=group2['id'], project_id=projectA['id']) # First, get a project scoped token - which should # contain the direct user role and the one by virtue # of group membership auth_data = self.build_authentication_request( user_id=user1['id'], password=user1['password'], project_id=projectA['id']) r = self.v3_create_token(auth_data) token = self.assertValidScopedTokenResponse(r) roles_ids = [] for ref in token['roles']: roles_ids.append(ref['id']) self.assertEqual(2, len(token['roles'])) self.assertIn(role_list[1]['id'], roles_ids) self.assertIn(role_list[5]['id'], roles_ids) # Now the same thing for a domain scoped token auth_data = self.build_authentication_request( user_id=user1['id'], password=user1['password'], domain_id=domainA['id']) r = self.v3_create_token(auth_data) token = self.assertValidScopedTokenResponse(r) roles_ids = [] for ref in token['roles']: roles_ids.append(ref['id']) self.assertEqual(2, len(token['roles'])) self.assertIn(role_list[0]['id'], roles_ids) self.assertIn(role_list[4]['id'], roles_ids) # Finally, add user1 to the 2nd group, and get a new # scoped token - the extra role should now be included # by virtue of the 2nd group self.identity_api.add_user_to_group(user1['id'], group2['id']) auth_data = self.build_authentication_request( user_id=user1['id'], password=user1['password'], project_id=projectA['id']) r = self.v3_create_token(auth_data) token = self.assertValidScopedTokenResponse(r) roles_ids = [] for ref in token['roles']: roles_ids.append(ref['id']) self.assertEqual(3, len(token['roles'])) self.assertIn(role_list[1]['id'], roles_ids) self.assertIn(role_list[5]['id'], roles_ids) self.assertIn(role_list[7]['id'], roles_ids) def test_auth_token_cross_domain_group_and_project(self): """Verify getting a token in cross domain group/project roles.""" # create domain, project and group and grant roles to user domain1 = unit.new_domain_ref() self.resource_api.create_domain(domain1['id'], domain1) project1 = unit.new_project_ref(domain_id=domain1['id']) self.resource_api.create_project(project1['id'], project1) user_foo = unit.create_user(self.identity_api, domain_id=test_v3.DEFAULT_DOMAIN_ID) role_member = unit.new_role_ref() self.role_api.create_role(role_member['id'], role_member) role_admin = unit.new_role_ref() self.role_api.create_role(role_admin['id'], role_admin) role_foo_domain1 = unit.new_role_ref() self.role_api.create_role(role_foo_domain1['id'], role_foo_domain1) role_group_domain1 = unit.new_role_ref() self.role_api.create_role(role_group_domain1['id'], role_group_domain1) self.assignment_api.add_user_to_project(project1['id'], user_foo['id']) new_group = unit.new_group_ref(domain_id=domain1['id']) new_group = self.identity_api.create_group(new_group) self.identity_api.add_user_to_group(user_foo['id'], new_group['id']) self.assignment_api.create_grant( user_id=user_foo['id'], project_id=project1['id'], role_id=role_member['id']) self.assignment_api.create_grant( group_id=new_group['id'], project_id=project1['id'], role_id=role_admin['id']) self.assignment_api.create_grant( user_id=user_foo['id'], domain_id=domain1['id'], role_id=role_foo_domain1['id']) self.assignment_api.create_grant( group_id=new_group['id'], domain_id=domain1['id'], role_id=role_group_domain1['id']) # Get a scoped token for the project auth_data = self.build_authentication_request( username=user_foo['name'], user_domain_id=test_v3.DEFAULT_DOMAIN_ID, password=user_foo['password'], project_name=project1['name'], project_domain_id=domain1['id']) r = self.v3_create_token(auth_data) scoped_token = self.assertValidScopedTokenResponse(r) project = scoped_token["project"] roles_ids = [] for ref in scoped_token['roles']: roles_ids.append(ref['id']) self.assertEqual(project1['id'], project["id"]) self.assertIn(role_member['id'], roles_ids) self.assertIn(role_admin['id'], roles_ids) self.assertNotIn(role_foo_domain1['id'], roles_ids) self.assertNotIn(role_group_domain1['id'], roles_ids) def test_project_id_scoped_token_with_user_domain_id(self): auth_data = self.build_authentication_request( username=self.user['name'], user_domain_id=self.domain['id'], password=self.user['password'], project_id=self.project['id']) r = self.v3_create_token(auth_data) self.assertValidProjectScopedTokenResponse(r) def test_project_id_scoped_token_with_user_domain_name(self): auth_data = self.build_authentication_request( username=self.user['name'], user_domain_name=self.domain['name'], password=self.user['password'], project_id=self.project['id']) r = self.v3_create_token(auth_data) self.assertValidProjectScopedTokenResponse(r) def test_domain_id_scoped_token_with_user_id(self): path = '/domains/%s/users/%s/roles/%s' % ( self.domain['id'], self.user['id'], self.role['id']) self.put(path=path) auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], domain_id=self.domain['id']) r = self.v3_create_token(auth_data) self.assertValidDomainScopedTokenResponse(r) def test_domain_id_scoped_token_with_user_domain_id(self): path = '/domains/%s/users/%s/roles/%s' % ( self.domain['id'], self.user['id'], self.role['id']) self.put(path=path) auth_data = self.build_authentication_request( username=self.user['name'], user_domain_id=self.domain['id'], password=self.user['password'], domain_id=self.domain['id']) r = self.v3_create_token(auth_data) self.assertValidDomainScopedTokenResponse(r) def test_domain_id_scoped_token_with_user_domain_name(self): path = '/domains/%s/users/%s/roles/%s' % ( self.domain['id'], self.user['id'], self.role['id']) self.put(path=path) auth_data = self.build_authentication_request( username=self.user['name'], user_domain_name=self.domain['name'], password=self.user['password'], domain_id=self.domain['id']) r = self.v3_create_token(auth_data) self.assertValidDomainScopedTokenResponse(r) def test_domain_name_scoped_token_with_user_id(self): path = '/domains/%s/users/%s/roles/%s' % ( self.domain['id'], self.user['id'], self.role['id']) self.put(path=path) auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], domain_name=self.domain['name']) r = self.v3_create_token(auth_data) self.assertValidDomainScopedTokenResponse(r) def test_domain_name_scoped_token_with_user_domain_id(self): path = '/domains/%s/users/%s/roles/%s' % ( self.domain['id'], self.user['id'], self.role['id']) self.put(path=path) auth_data = self.build_authentication_request( username=self.user['name'], user_domain_id=self.domain['id'], password=self.user['password'], domain_name=self.domain['name']) r = self.v3_create_token(auth_data) self.assertValidDomainScopedTokenResponse(r) def test_domain_name_scoped_token_with_user_domain_name(self): path = '/domains/%s/users/%s/roles/%s' % ( self.domain['id'], self.user['id'], self.role['id']) self.put(path=path) auth_data = self.build_authentication_request( username=self.user['name'], user_domain_name=self.domain['name'], password=self.user['password'], domain_name=self.domain['name']) r = self.v3_create_token(auth_data) self.assertValidDomainScopedTokenResponse(r) def test_domain_scope_token_with_group_role(self): group = unit.new_group_ref(domain_id=self.domain_id) group = self.identity_api.create_group(group) # add user to group self.identity_api.add_user_to_group(self.user['id'], group['id']) # grant the domain role to group path = '/domains/%s/groups/%s/roles/%s' % ( self.domain['id'], group['id'], self.role['id']) self.put(path=path) # now get a domain-scoped token auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], domain_id=self.domain['id']) r = self.v3_create_token(auth_data) self.assertValidDomainScopedTokenResponse(r) def test_domain_scope_token_with_name(self): # grant the domain role to user path = '/domains/%s/users/%s/roles/%s' % ( self.domain['id'], self.user['id'], self.role['id']) self.put(path=path) # now get a domain-scoped token auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], domain_name=self.domain['name']) r = self.v3_create_token(auth_data) self.assertValidDomainScopedTokenResponse(r) def test_domain_scope_failed(self): auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], domain_id=self.domain['id']) self.v3_create_token(auth_data, expected_status=http_client.UNAUTHORIZED) def test_auth_with_id(self): auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password']) r = self.v3_create_token(auth_data) self.assertValidUnscopedTokenResponse(r) token = r.headers.get('X-Subject-Token') # test token auth auth_data = self.build_authentication_request(token=token) r = self.v3_create_token(auth_data) self.assertValidUnscopedTokenResponse(r) def get_v2_token(self, tenant_id=None): body = { 'auth': { 'passwordCredentials': { 'username': self.default_domain_user['name'], 'password': self.default_domain_user['password'], }, }, } r = self.admin_request(method='POST', path='/v2.0/tokens', body=body) return r def test_validate_v2_unscoped_token_with_v3_api(self): v2_token = self.get_v2_token().result['access']['token']['id'] auth_data = self.build_authentication_request(token=v2_token) r = self.v3_create_token(auth_data) self.assertValidUnscopedTokenResponse(r) def test_validate_v2_scoped_token_with_v3_api(self): v2_response = self.get_v2_token( tenant_id=self.default_domain_project['id']) result = v2_response.result v2_token = result['access']['token']['id'] auth_data = self.build_authentication_request( token=v2_token, project_id=self.default_domain_project['id']) r = self.v3_create_token(auth_data) self.assertValidScopedTokenResponse(r) def test_invalid_user_id(self): auth_data = self.build_authentication_request( user_id=uuid.uuid4().hex, password=self.user['password']) self.v3_create_token(auth_data, expected_status=http_client.UNAUTHORIZED) def test_invalid_user_name(self): auth_data = self.build_authentication_request( username=uuid.uuid4().hex, user_domain_id=self.domain['id'], password=self.user['password']) self.v3_create_token(auth_data, expected_status=http_client.UNAUTHORIZED) def test_invalid_domain_id(self): auth_data = self.build_authentication_request( username=self.user['name'], user_domain_id=uuid.uuid4().hex, password=self.user['password']) self.v3_create_token(auth_data, expected_status=http_client.UNAUTHORIZED) def test_invalid_domain_name(self): auth_data = self.build_authentication_request( username=self.user['name'], user_domain_name=uuid.uuid4().hex, password=self.user['password']) self.v3_create_token(auth_data, expected_status=http_client.UNAUTHORIZED) def test_invalid_password(self): auth_data = self.build_authentication_request( user_id=self.user['id'], password=uuid.uuid4().hex) self.v3_create_token(auth_data, expected_status=http_client.UNAUTHORIZED) def test_remote_user_no_realm(self): api = auth.controllers.Auth() context, auth_info, auth_context = self.build_external_auth_request( self.default_domain_user['name']) api.authenticate(context, auth_info, auth_context) self.assertEqual(self.default_domain_user['id'], auth_context['user_id']) # Now test to make sure the user name can, itself, contain the # '@' character. user = {'name': 'myname@mydivision'} self.identity_api.update_user(self.default_domain_user['id'], user) context, auth_info, auth_context = self.build_external_auth_request( user["name"]) api.authenticate(context, auth_info, auth_context) self.assertEqual(self.default_domain_user['id'], auth_context['user_id']) def test_remote_user_no_domain(self): api = auth.controllers.Auth() context, auth_info, auth_context = self.build_external_auth_request( self.user['name']) self.assertRaises(exception.Unauthorized, api.authenticate, context, auth_info, auth_context) def test_remote_user_and_password(self): # both REMOTE_USER and password methods must pass. # note that they do not have to match api = auth.controllers.Auth() auth_data = self.build_authentication_request( user_domain_id=self.default_domain_user['domain_id'], username=self.default_domain_user['name'], password=self.default_domain_user['password'])['auth'] context, auth_info, auth_context = self.build_external_auth_request( self.default_domain_user['name'], auth_data=auth_data) api.authenticate(context, auth_info, auth_context) def test_remote_user_and_explicit_external(self): # both REMOTE_USER and password methods must pass. # note that they do not have to match auth_data = self.build_authentication_request( user_domain_id=self.domain['id'], username=self.user['name'], password=self.user['password'])['auth'] auth_data['identity']['methods'] = ["password", "external"] auth_data['identity']['external'] = {} api = auth.controllers.Auth() auth_info = auth.controllers.AuthInfo(None, auth_data) auth_context = {'extras': {}, 'method_names': []} self.assertRaises(exception.Unauthorized, api.authenticate, self.empty_context, auth_info, auth_context) def test_remote_user_bad_password(self): # both REMOTE_USER and password methods must pass. api = auth.controllers.Auth() auth_data = self.build_authentication_request( user_domain_id=self.domain['id'], username=self.user['name'], password='badpassword')['auth'] context, auth_info, auth_context = self.build_external_auth_request( self.default_domain_user['name'], auth_data=auth_data) self.assertRaises(exception.Unauthorized, api.authenticate, context, auth_info, auth_context) def test_bind_not_set_with_remote_user(self): self.config_fixture.config(group='token', bind=[]) auth_data = self.build_authentication_request() remote_user = self.default_domain_user['name'] self.admin_app.extra_environ.update({'REMOTE_USER': remote_user, 'AUTH_TYPE': 'Negotiate'}) r = self.v3_create_token(auth_data) token = self.assertValidUnscopedTokenResponse(r) self.assertNotIn('bind', token) # TODO(ayoung): move to TestPKITokenAPIs; it will be run for both formats def test_verify_with_bound_token(self): self.config_fixture.config(group='token', bind='kerberos') auth_data = self.build_authentication_request( project_id=self.project['id']) remote_user = self.default_domain_user['name'] self.admin_app.extra_environ.update({'REMOTE_USER': remote_user, 'AUTH_TYPE': 'Negotiate'}) token = self.get_requested_token(auth_data) headers = {'X-Subject-Token': token} r = self.get('/auth/tokens', headers=headers, token=token) token = self.assertValidProjectScopedTokenResponse(r) self.assertEqual(self.default_domain_user['name'], token['bind']['kerberos']) def test_auth_with_bind_token(self): self.config_fixture.config(group='token', bind=['kerberos']) auth_data = self.build_authentication_request() remote_user = self.default_domain_user['name'] self.admin_app.extra_environ.update({'REMOTE_USER': remote_user, 'AUTH_TYPE': 'Negotiate'}) r = self.v3_create_token(auth_data) # the unscoped token should have bind information in it token = self.assertValidUnscopedTokenResponse(r) self.assertEqual(remote_user, token['bind']['kerberos']) token = r.headers.get('X-Subject-Token') # using unscoped token with remote user succeeds auth_params = {'token': token, 'project_id': self.project_id} auth_data = self.build_authentication_request(**auth_params) r = self.v3_create_token(auth_data) token = self.assertValidProjectScopedTokenResponse(r) # the bind information should be carried over from the original token self.assertEqual(remote_user, token['bind']['kerberos']) def test_v2_v3_bind_token_intermix(self): self.config_fixture.config(group='token', bind='kerberos') # we need our own user registered to the default domain because of # the way external auth works. remote_user = self.default_domain_user['name'] self.admin_app.extra_environ.update({'REMOTE_USER': remote_user, 'AUTH_TYPE': 'Negotiate'}) body = {'auth': {}} resp = self.admin_request(path='/v2.0/tokens', method='POST', body=body) v2_token_data = resp.result bind = v2_token_data['access']['token']['bind'] self.assertEqual(self.default_domain_user['name'], bind['kerberos']) v2_token_id = v2_token_data['access']['token']['id'] # NOTE(gyee): self.get() will try to obtain an auth token if one # is not provided. When REMOTE_USER is present in the request # environment, the external user auth plugin is used in conjunction # with the password auth for the admin user. Therefore, we need to # cleanup the REMOTE_USER information from the previous call. del self.admin_app.extra_environ['REMOTE_USER'] headers = {'X-Subject-Token': v2_token_id} resp = self.get('/auth/tokens', headers=headers) token_data = resp.result self.assertDictEqual(v2_token_data['access']['token']['bind'], token_data['token']['bind']) def test_authenticating_a_user_with_no_password(self): user = unit.new_user_ref(domain_id=self.domain['id']) del user['password'] # can't have a password for this test user = self.identity_api.create_user(user) auth_data = self.build_authentication_request( user_id=user['id'], password='password') self.v3_create_token(auth_data, expected_status=http_client.UNAUTHORIZED) def test_disabled_default_project_result_in_unscoped_token(self): # create a disabled project to work with project = self.create_new_default_project_for_user( self.user['id'], self.domain_id, enable_project=False) # assign a role to user for the new project self.assignment_api.add_role_to_user_and_project(self.user['id'], project['id'], self.role_id) # attempt to authenticate without requesting a project auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password']) r = self.v3_create_token(auth_data) self.assertValidUnscopedTokenResponse(r) def test_disabled_default_project_domain_result_in_unscoped_token(self): domain_ref = unit.new_domain_ref() r = self.post('/domains', body={'domain': domain_ref}) domain = self.assertValidDomainResponse(r, domain_ref) project = self.create_new_default_project_for_user( self.user['id'], domain['id']) # assign a role to user for the new project self.assignment_api.add_role_to_user_and_project(self.user['id'], project['id'], self.role_id) # now disable the project domain body = {'domain': {'enabled': False}} r = self.patch('/domains/%(domain_id)s' % {'domain_id': domain['id']}, body=body) self.assertValidDomainResponse(r) # attempt to authenticate without requesting a project auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password']) r = self.v3_create_token(auth_data) self.assertValidUnscopedTokenResponse(r) def test_no_access_to_default_project_result_in_unscoped_token(self): # create a disabled project to work with self.create_new_default_project_for_user(self.user['id'], self.domain_id) # attempt to authenticate without requesting a project auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password']) r = self.v3_create_token(auth_data) self.assertValidUnscopedTokenResponse(r) def test_disabled_scope_project_domain_result_in_401(self): # create a disabled domain domain = unit.new_domain_ref() domain = self.resource_api.create_domain(domain['id'], domain) # create a project in the domain project = unit.new_project_ref(domain_id=domain['id']) self.resource_api.create_project(project['id'], project) # assign some role to self.user for the project in the domain self.assignment_api.add_role_to_user_and_project( self.user['id'], project['id'], self.role_id) # Disable the domain domain['enabled'] = False self.resource_api.update_domain(domain['id'], domain) # user should not be able to auth with project_id auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=project['id']) self.v3_create_token(auth_data, expected_status=http_client.UNAUTHORIZED) # user should not be able to auth with project_name & domain auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_name=project['name'], project_domain_id=domain['id']) self.v3_create_token(auth_data, expected_status=http_client.UNAUTHORIZED) def test_auth_methods_with_different_identities_fails(self): # get the token for a user. This is self.user which is different from # self.default_domain_user. token = self.get_scoped_token() # try both password and token methods with different identities and it # should fail auth_data = self.build_authentication_request( token=token, user_id=self.default_domain_user['id'], password=self.default_domain_user['password']) self.v3_create_token(auth_data, expected_status=http_client.UNAUTHORIZED) def test_authenticate_fails_if_project_unsafe(self): """Verify authenticate to a project with unsafe name fails.""" # Start with url name restrictions off, so we can create the unsafe # named project self.config_fixture.config(group='resource', project_name_url_safe='off') unsafe_name = 'i am not / safe' project = unit.new_project_ref(domain_id=test_v3.DEFAULT_DOMAIN_ID, name=unsafe_name) self.resource_api.create_project(project['id'], project) role_member = unit.new_role_ref() self.role_api.create_role(role_member['id'], role_member) self.assignment_api.add_role_to_user_and_project( self.user['id'], project['id'], role_member['id']) auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_name=project['name'], project_domain_id=test_v3.DEFAULT_DOMAIN_ID) # Since name url restriction is off, we should be able to autenticate self.v3_create_token(auth_data) # Set the name url restriction to new, which should still allow us to # authenticate self.config_fixture.config(group='resource', project_name_url_safe='new') self.v3_create_token(auth_data) # Set the name url restriction to strict and we should fail to # authenticate self.config_fixture.config(group='resource', project_name_url_safe='strict') self.v3_create_token(auth_data, expected_status=http_client.UNAUTHORIZED) def test_authenticate_fails_if_domain_unsafe(self): """Verify authenticate to a domain with unsafe name fails.""" # Start with url name restrictions off, so we can create the unsafe # named domain self.config_fixture.config(group='resource', domain_name_url_safe='off') unsafe_name = 'i am not / safe' domain = unit.new_domain_ref(name=unsafe_name) self.resource_api.create_domain(domain['id'], domain) role_member = unit.new_role_ref() self.role_api.create_role(role_member['id'], role_member) self.assignment_api.create_grant( role_member['id'], user_id=self.user['id'], domain_id=domain['id']) auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], domain_name=domain['name']) # Since name url restriction is off, we should be able to autenticate self.v3_create_token(auth_data) # Set the name url restriction to new, which should still allow us to # authenticate self.config_fixture.config(group='resource', project_name_url_safe='new') self.v3_create_token(auth_data) # Set the name url restriction to strict and we should fail to # authenticate self.config_fixture.config(group='resource', domain_name_url_safe='strict') self.v3_create_token(auth_data, expected_status=http_client.UNAUTHORIZED) def test_authenticate_fails_to_project_if_domain_unsafe(self): """Verify authenticate to a project using unsafe domain name fails.""" # Start with url name restrictions off, so we can create the unsafe # named domain self.config_fixture.config(group='resource', domain_name_url_safe='off') unsafe_name = 'i am not / safe' domain = unit.new_domain_ref(name=unsafe_name) self.resource_api.create_domain(domain['id'], domain) # Add a (safely named) project to that domain project = unit.new_project_ref(domain_id=domain['id']) self.resource_api.create_project(project['id'], project) role_member = unit.new_role_ref() self.role_api.create_role(role_member['id'], role_member) self.assignment_api.create_grant( role_member['id'], user_id=self.user['id'], project_id=project['id']) # An auth request via project ID, but specifying domain by name auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_name=project['name'], project_domain_name=domain['name']) # Since name url restriction is off, we should be able to autenticate self.v3_create_token(auth_data) # Set the name url restriction to new, which should still allow us to # authenticate self.config_fixture.config(group='resource', project_name_url_safe='new') self.v3_create_token(auth_data) # Set the name url restriction to strict and we should fail to # authenticate self.config_fixture.config(group='resource', domain_name_url_safe='strict') self.v3_create_token(auth_data, expected_status=http_client.UNAUTHORIZED) class TestAuthJSONExternal(test_v3.RestfulTestCase): content_type = 'json' def auth_plugin_config_override(self, methods=None, **method_classes): self.config_fixture.config(group='auth', methods=[]) def test_remote_user_no_method(self): api = auth.controllers.Auth() context, auth_info, auth_context = self.build_external_auth_request( self.default_domain_user['name']) self.assertRaises(exception.Unauthorized, api.authenticate, context, auth_info, auth_context) class TestTrustOptional(test_v3.RestfulTestCase): def config_overrides(self): super(TestTrustOptional, self).config_overrides() self.config_fixture.config(group='trust', enabled=False) def test_trusts_returns_not_found(self): self.get('/OS-TRUST/trusts', body={'trust': {}}, expected_status=http_client.NOT_FOUND) self.post('/OS-TRUST/trusts', body={'trust': {}}, expected_status=http_client.NOT_FOUND) def test_auth_with_scope_in_trust_forbidden(self): auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], trust_id=uuid.uuid4().hex) self.v3_create_token(auth_data, expected_status=http_client.FORBIDDEN) class TrustAPIBehavior(test_v3.RestfulTestCase): """Redelegation valid and secure Redelegation is a hierarchical structure of trusts between initial trustor and a group of users allowed to impersonate trustor and act in his name. Hierarchy is created in a process of trusting already trusted permissions and organized as an adjacency list using 'redelegated_trust_id' field. Redelegation is valid if each subsequent trust in a chain passes 'not more' permissions than being redelegated. Trust constraints are: * roles - set of roles trusted by trustor * expiration_time * allow_redelegation - a flag * redelegation_count - decreasing value restricting length of trust chain * remaining_uses - DISALLOWED when allow_redelegation == True Trust becomes invalid in case: * trust roles were revoked from trustor * one of the users in the delegation chain was disabled or deleted * expiration time passed * one of the parent trusts has become invalid * one of the parent trusts was deleted """ def config_overrides(self): super(TrustAPIBehavior, self).config_overrides() self.config_fixture.config( group='trust', enabled=True, allow_redelegation=True, max_redelegation_count=10 ) def setUp(self): super(TrustAPIBehavior, self).setUp() # Create a trustee to delegate stuff to self.trustee_user = unit.create_user(self.identity_api, domain_id=self.domain_id) # trustor->trustee self.redelegated_trust_ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user['id'], project_id=self.project_id, impersonation=True, expires=dict(minutes=1), role_ids=[self.role_id], allow_redelegation=True) # trustor->trustee (no redelegation) self.chained_trust_ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user['id'], project_id=self.project_id, impersonation=True, role_ids=[self.role_id], allow_redelegation=True) def _get_trust_token(self, trust): trust_id = trust['id'] auth_data = self.build_authentication_request( user_id=self.trustee_user['id'], password=self.trustee_user['password'], trust_id=trust_id) trust_token = self.get_requested_token(auth_data) return trust_token def test_depleted_redelegation_count_error(self): self.redelegated_trust_ref['redelegation_count'] = 0 r = self.post('/OS-TRUST/trusts', body={'trust': self.redelegated_trust_ref}) trust = self.assertValidTrustResponse(r) trust_token = self._get_trust_token(trust) # Attempt to create a redelegated trust. self.post('/OS-TRUST/trusts', body={'trust': self.chained_trust_ref}, token=trust_token, expected_status=http_client.FORBIDDEN) def test_modified_redelegation_count_error(self): r = self.post('/OS-TRUST/trusts', body={'trust': self.redelegated_trust_ref}) trust = self.assertValidTrustResponse(r) trust_token = self._get_trust_token(trust) # Attempt to create a redelegated trust with incorrect # redelegation_count. correct = trust['redelegation_count'] - 1 incorrect = correct - 1 self.chained_trust_ref['redelegation_count'] = incorrect self.post('/OS-TRUST/trusts', body={'trust': self.chained_trust_ref}, token=trust_token, expected_status=http_client.FORBIDDEN) def test_max_redelegation_count_constraint(self): incorrect = CONF.trust.max_redelegation_count + 1 self.redelegated_trust_ref['redelegation_count'] = incorrect self.post('/OS-TRUST/trusts', body={'trust': self.redelegated_trust_ref}, expected_status=http_client.FORBIDDEN) def test_redelegation_expiry(self): r = self.post('/OS-TRUST/trusts', body={'trust': self.redelegated_trust_ref}) trust = self.assertValidTrustResponse(r) trust_token = self._get_trust_token(trust) # Attempt to create a redelegated trust supposed to last longer # than the parent trust: let's give it 10 minutes (>1 minute). too_long_live_chained_trust_ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user['id'], project_id=self.project_id, impersonation=True, expires=dict(minutes=10), role_ids=[self.role_id]) self.post('/OS-TRUST/trusts', body={'trust': too_long_live_chained_trust_ref}, token=trust_token, expected_status=http_client.FORBIDDEN) def test_redelegation_remaining_uses(self): r = self.post('/OS-TRUST/trusts', body={'trust': self.redelegated_trust_ref}) trust = self.assertValidTrustResponse(r) trust_token = self._get_trust_token(trust) # Attempt to create a redelegated trust with remaining_uses defined. # It must fail according to specification: remaining_uses must be # omitted for trust redelegation. Any number here. self.chained_trust_ref['remaining_uses'] = 5 self.post('/OS-TRUST/trusts', body={'trust': self.chained_trust_ref}, token=trust_token, expected_status=http_client.BAD_REQUEST) def test_roles_subset(self): # Build second role role = unit.new_role_ref() self.role_api.create_role(role['id'], role) # assign a new role to the user self.assignment_api.create_grant(role_id=role['id'], user_id=self.user_id, project_id=self.project_id) # Create first trust with extended set of roles ref = self.redelegated_trust_ref ref['expires_at'] = datetime.datetime.utcnow().replace( year=2032).strftime(unit.TIME_FORMAT) ref['roles'].append({'id': role['id']}) r = self.post('/OS-TRUST/trusts', body={'trust': ref}) trust = self.assertValidTrustResponse(r) # Trust created with exact set of roles (checked by role id) role_id_set = set(r['id'] for r in ref['roles']) trust_role_id_set = set(r['id'] for r in trust['roles']) self.assertEqual(role_id_set, trust_role_id_set) trust_token = self._get_trust_token(trust) # Chain second trust with roles subset self.chained_trust_ref['expires_at'] = ( datetime.datetime.utcnow().replace(year=2028).strftime( unit.TIME_FORMAT)) r = self.post('/OS-TRUST/trusts', body={'trust': self.chained_trust_ref}, token=trust_token) trust2 = self.assertValidTrustResponse(r) # First trust contains roles superset # Second trust contains roles subset role_id_set1 = set(r['id'] for r in trust['roles']) role_id_set2 = set(r['id'] for r in trust2['roles']) self.assertThat(role_id_set1, matchers.GreaterThan(role_id_set2)) def test_redelegate_with_role_by_name(self): # For role by name testing ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user['id'], project_id=self.project_id, impersonation=True, expires=dict(minutes=1), role_names=[self.role['name']], allow_redelegation=True) ref['expires_at'] = datetime.datetime.utcnow().replace( year=2032).strftime(unit.TIME_FORMAT) r = self.post('/OS-TRUST/trusts', body={'trust': ref}) trust = self.assertValidTrustResponse(r) # Ensure we can get a token with this trust trust_token = self._get_trust_token(trust) # Chain second trust with roles subset ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user['id'], project_id=self.project_id, impersonation=True, role_names=[self.role['name']], allow_redelegation=True) ref['expires_at'] = datetime.datetime.utcnow().replace( year=2028).strftime(unit.TIME_FORMAT) r = self.post('/OS-TRUST/trusts', body={'trust': ref}, token=trust_token) trust = self.assertValidTrustResponse(r) # Ensure we can get a token with this trust self._get_trust_token(trust) def test_redelegate_new_role_fails(self): r = self.post('/OS-TRUST/trusts', body={'trust': self.redelegated_trust_ref}) trust = self.assertValidTrustResponse(r) trust_token = self._get_trust_token(trust) # Build second trust with a role not in parent's roles role = unit.new_role_ref() self.role_api.create_role(role['id'], role) # assign a new role to the user self.assignment_api.create_grant(role_id=role['id'], user_id=self.user_id, project_id=self.project_id) # Try to chain a trust with the role not from parent trust self.chained_trust_ref['roles'] = [{'id': role['id']}] # Bypass policy enforcement with mock.patch.object(rules, 'enforce', return_value=True): self.post('/OS-TRUST/trusts', body={'trust': self.chained_trust_ref}, token=trust_token, expected_status=http_client.FORBIDDEN) def test_redelegation_terminator(self): self.redelegated_trust_ref['expires_at'] = ( datetime.datetime.utcnow().replace(year=2032).strftime( unit.TIME_FORMAT)) r = self.post('/OS-TRUST/trusts', body={'trust': self.redelegated_trust_ref}) trust = self.assertValidTrustResponse(r) trust_token = self._get_trust_token(trust) # Build second trust - the terminator self.chained_trust_ref['expires_at'] = ( datetime.datetime.utcnow().replace(year=2028).strftime( unit.TIME_FORMAT)) ref = dict(self.chained_trust_ref, redelegation_count=1, allow_redelegation=False) r = self.post('/OS-TRUST/trusts', body={'trust': ref}, token=trust_token) trust = self.assertValidTrustResponse(r) # Check that allow_redelegation == False caused redelegation_count # to be set to 0, while allow_redelegation is removed self.assertNotIn('allow_redelegation', trust) self.assertEqual(0, trust['redelegation_count']) trust_token = self._get_trust_token(trust) # Build third trust, same as second self.post('/OS-TRUST/trusts', body={'trust': ref}, token=trust_token, expected_status=http_client.FORBIDDEN) def test_redelegation_without_impersonation(self): # Update trust to not allow impersonation self.redelegated_trust_ref['impersonation'] = False # Create trust resp = self.post('/OS-TRUST/trusts', body={'trust': self.redelegated_trust_ref}, expected_status=http_client.CREATED) trust = self.assertValidTrustResponse(resp) # Get trusted token without impersonation auth_data = self.build_authentication_request( user_id=self.trustee_user['id'], password=self.trustee_user['password'], trust_id=trust['id']) trust_token = self.get_requested_token(auth_data) # Create second user for redelegation trustee_user_2 = unit.create_user(self.identity_api, domain_id=self.domain_id) # Trust for redelegation trust_ref_2 = unit.new_trust_ref( trustor_user_id=self.trustee_user['id'], trustee_user_id=trustee_user_2['id'], project_id=self.project_id, impersonation=False, expires=dict(minutes=1), role_ids=[self.role_id], allow_redelegation=False) # Creating a second trust should not be allowed since trustor does not # have the role to delegate thus returning 404 NOT FOUND. resp = self.post('/OS-TRUST/trusts', body={'trust': trust_ref_2}, token=trust_token, expected_status=http_client.NOT_FOUND) def test_create_unscoped_trust(self): ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user['id']) r = self.post('/OS-TRUST/trusts', body={'trust': ref}) self.assertValidTrustResponse(r, ref) def test_create_trust_no_roles(self): ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user['id'], project_id=self.project_id) self.post('/OS-TRUST/trusts', body={'trust': ref}, expected_status=http_client.FORBIDDEN) def _initialize_test_consume_trust(self, count): # Make sure remaining_uses is decremented as we consume the trust ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user['id'], project_id=self.project_id, remaining_uses=count, role_ids=[self.role_id]) r = self.post('/OS-TRUST/trusts', body={'trust': ref}) # make sure the trust exists trust = self.assertValidTrustResponse(r, ref) r = self.get( '/OS-TRUST/trusts/%(trust_id)s' % {'trust_id': trust['id']}) # get a token for the trustee auth_data = self.build_authentication_request( user_id=self.trustee_user['id'], password=self.trustee_user['password']) r = self.v3_create_token(auth_data) token = r.headers.get('X-Subject-Token') # get a trust token, consume one use auth_data = self.build_authentication_request( token=token, trust_id=trust['id']) r = self.v3_create_token(auth_data) return trust def test_consume_trust_once(self): trust = self._initialize_test_consume_trust(2) # check decremented value r = self.get( '/OS-TRUST/trusts/%(trust_id)s' % {'trust_id': trust['id']}) trust = r.result.get('trust') self.assertIsNotNone(trust) self.assertEqual(1, trust['remaining_uses']) # FIXME(lbragstad): Assert the role that is returned is the right role. def test_create_one_time_use_trust(self): trust = self._initialize_test_consume_trust(1) # No more uses, the trust is made unavailable self.get( '/OS-TRUST/trusts/%(trust_id)s' % {'trust_id': trust['id']}, expected_status=http_client.NOT_FOUND) # this time we can't get a trust token auth_data = self.build_authentication_request( user_id=self.trustee_user['id'], password=self.trustee_user['password'], trust_id=trust['id']) self.v3_create_token(auth_data, expected_status=http_client.UNAUTHORIZED) def test_create_unlimited_use_trust(self): # by default trusts are unlimited in terms of tokens that can be # generated from them, this test creates such a trust explicitly ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user['id'], project_id=self.project_id, remaining_uses=None, role_ids=[self.role_id]) r = self.post('/OS-TRUST/trusts', body={'trust': ref}) trust = self.assertValidTrustResponse(r, ref) r = self.get( '/OS-TRUST/trusts/%(trust_id)s' % {'trust_id': trust['id']}) auth_data = self.build_authentication_request( user_id=self.trustee_user['id'], password=self.trustee_user['password']) r = self.v3_create_token(auth_data) token = r.headers.get('X-Subject-Token') auth_data = self.build_authentication_request( token=token, trust_id=trust['id']) r = self.v3_create_token(auth_data) r = self.get( '/OS-TRUST/trusts/%(trust_id)s' % {'trust_id': trust['id']}) trust = r.result.get('trust') self.assertIsNone(trust['remaining_uses']) def test_impersonation_token_cannot_create_new_trust(self): ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user['id'], project_id=self.project_id, impersonation=True, expires=dict(minutes=1), role_ids=[self.role_id]) r = self.post('/OS-TRUST/trusts', body={'trust': ref}) trust = self.assertValidTrustResponse(r) auth_data = self.build_authentication_request( user_id=self.trustee_user['id'], password=self.trustee_user['password'], trust_id=trust['id']) trust_token = self.get_requested_token(auth_data) # Build second trust ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user['id'], project_id=self.project_id, impersonation=True, expires=dict(minutes=1), role_ids=[self.role_id]) self.post('/OS-TRUST/trusts', body={'trust': ref}, token=trust_token, expected_status=http_client.FORBIDDEN) def test_trust_deleted_grant(self): # create a new role role = unit.new_role_ref() self.role_api.create_role(role['id'], role) grant_url = ( '/projects/%(project_id)s/users/%(user_id)s/' 'roles/%(role_id)s' % { 'project_id': self.project_id, 'user_id': self.user_id, 'role_id': role['id']}) # assign a new role self.put(grant_url) # create a trust that delegates the new role ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user['id'], project_id=self.project_id, impersonation=False, expires=dict(minutes=1), role_ids=[role['id']]) r = self.post('/OS-TRUST/trusts', body={'trust': ref}) trust = self.assertValidTrustResponse(r) # delete the grant self.delete(grant_url) # attempt to get a trust token with the deleted grant # and ensure it's unauthorized auth_data = self.build_authentication_request( user_id=self.trustee_user['id'], password=self.trustee_user['password'], trust_id=trust['id']) r = self.v3_create_token(auth_data, expected_status=http_client.FORBIDDEN) def test_trust_chained(self): """Test that a trust token can't be used to execute another trust. To do this, we create an A->B->C hierarchy of trusts, then attempt to execute the trusts in series (C->B->A). """ # create a sub-trustee user sub_trustee_user = unit.create_user( self.identity_api, domain_id=test_v3.DEFAULT_DOMAIN_ID) sub_trustee_user_id = sub_trustee_user['id'] # create a new role role = unit.new_role_ref() self.role_api.create_role(role['id'], role) # assign the new role to trustee self.put( '/projects/%(project_id)s/users/%(user_id)s/roles/%(role_id)s' % { 'project_id': self.project_id, 'user_id': self.trustee_user['id'], 'role_id': role['id']}) # create a trust from trustor -> trustee ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user['id'], project_id=self.project_id, impersonation=True, expires=dict(minutes=1), role_ids=[self.role_id]) r = self.post('/OS-TRUST/trusts', body={'trust': ref}) trust1 = self.assertValidTrustResponse(r) # authenticate as trustee so we can create a second trust auth_data = self.build_authentication_request( user_id=self.trustee_user['id'], password=self.trustee_user['password'], project_id=self.project_id) token = self.get_requested_token(auth_data) # create a trust from trustee -> sub-trustee ref = unit.new_trust_ref( trustor_user_id=self.trustee_user['id'], trustee_user_id=sub_trustee_user_id, project_id=self.project_id, impersonation=True, expires=dict(minutes=1), role_ids=[role['id']]) r = self.post('/OS-TRUST/trusts', token=token, body={'trust': ref}) trust2 = self.assertValidTrustResponse(r) # authenticate as sub-trustee and get a trust token auth_data = self.build_authentication_request( user_id=sub_trustee_user['id'], password=sub_trustee_user['password'], trust_id=trust2['id']) trust_token = self.get_requested_token(auth_data) # attempt to get the second trust using a trust token auth_data = self.build_authentication_request( token=trust_token, trust_id=trust1['id']) r = self.v3_create_token(auth_data, expected_status=http_client.FORBIDDEN) def assertTrustTokensRevoked(self, trust_id): revocation_response = self.get('/OS-REVOKE/events') revocation_events = revocation_response.json_body['events'] found = False for event in revocation_events: if event.get('OS-TRUST:trust_id') == trust_id: found = True self.assertTrue(found, 'event with trust_id %s not found in list' % trust_id) def test_delete_trust_revokes_tokens(self): ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user['id'], project_id=self.project_id, impersonation=False, expires=dict(minutes=1), role_ids=[self.role_id]) r = self.post('/OS-TRUST/trusts', body={'trust': ref}) trust = self.assertValidTrustResponse(r) trust_id = trust['id'] auth_data = self.build_authentication_request( user_id=self.trustee_user['id'], password=self.trustee_user['password'], trust_id=trust_id) r = self.v3_create_token(auth_data) self.assertValidProjectScopedTokenResponse( r, self.trustee_user) trust_token = r.headers['X-Subject-Token'] self.delete('/OS-TRUST/trusts/%(trust_id)s' % { 'trust_id': trust_id}) headers = {'X-Subject-Token': trust_token} self.head('/auth/tokens', headers=headers, expected_status=http_client.NOT_FOUND) self.assertTrustTokensRevoked(trust_id) def disable_user(self, user): user['enabled'] = False self.identity_api.update_user(user['id'], user) def test_trust_get_token_fails_if_trustor_disabled(self): ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user['id'], project_id=self.project_id, impersonation=False, expires=dict(minutes=1), role_ids=[self.role_id]) r = self.post('/OS-TRUST/trusts', body={'trust': ref}) trust = self.assertValidTrustResponse(r, ref) auth_data = self.build_authentication_request( user_id=self.trustee_user['id'], password=self.trustee_user['password'], trust_id=trust['id']) self.v3_create_token(auth_data) self.disable_user(self.user) auth_data = self.build_authentication_request( user_id=self.trustee_user['id'], password=self.trustee_user['password'], trust_id=trust['id']) self.v3_create_token(auth_data, expected_status=http_client.FORBIDDEN) def test_trust_get_token_fails_if_trustee_disabled(self): ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user['id'], project_id=self.project_id, impersonation=False, expires=dict(minutes=1), role_ids=[self.role_id]) r = self.post('/OS-TRUST/trusts', body={'trust': ref}) trust = self.assertValidTrustResponse(r, ref) auth_data = self.build_authentication_request( user_id=self.trustee_user['id'], password=self.trustee_user['password'], trust_id=trust['id']) self.v3_create_token(auth_data) self.disable_user(self.trustee_user) auth_data = self.build_authentication_request( user_id=self.trustee_user['id'], password=self.trustee_user['password'], trust_id=trust['id']) self.v3_create_token(auth_data, expected_status=http_client.UNAUTHORIZED) def test_delete_trust(self): ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user['id'], project_id=self.project_id, impersonation=False, expires=dict(minutes=1), role_ids=[self.role_id]) r = self.post('/OS-TRUST/trusts', body={'trust': ref}) trust = self.assertValidTrustResponse(r, ref) self.delete('/OS-TRUST/trusts/%(trust_id)s' % { 'trust_id': trust['id']}) auth_data = self.build_authentication_request( user_id=self.trustee_user['id'], password=self.trustee_user['password'], trust_id=trust['id']) self.v3_create_token(auth_data, expected_status=http_client.UNAUTHORIZED) def test_change_password_invalidates_trust_tokens(self): ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user['id'], project_id=self.project_id, impersonation=True, expires=dict(minutes=1), role_ids=[self.role_id]) r = self.post('/OS-TRUST/trusts', body={'trust': ref}) trust = self.assertValidTrustResponse(r) auth_data = self.build_authentication_request( user_id=self.trustee_user['id'], password=self.trustee_user['password'], trust_id=trust['id']) r = self.v3_create_token(auth_data) self.assertValidProjectScopedTokenResponse(r, self.user) trust_token = r.headers.get('X-Subject-Token') self.get('/OS-TRUST/trusts?trustor_user_id=%s' % self.user_id, token=trust_token) self.assertValidUserResponse( self.patch('/users/%s' % self.trustee_user['id'], body={'user': {'password': uuid.uuid4().hex}})) self.get('/OS-TRUST/trusts?trustor_user_id=%s' % self.user_id, expected_status=http_client.UNAUTHORIZED, token=trust_token) def test_trustee_can_do_role_ops(self): resp = self.post('/OS-TRUST/trusts', body={'trust': self.redelegated_trust_ref}) trust = self.assertValidTrustResponse(resp) trust_token = self._get_trust_token(trust) resp = self.get( '/OS-TRUST/trusts/%(trust_id)s/roles' % { 'trust_id': trust['id']}, token=trust_token) self.assertValidRoleListResponse(resp, self.role) self.head( '/OS-TRUST/trusts/%(trust_id)s/roles/%(role_id)s' % { 'trust_id': trust['id'], 'role_id': self.role['id']}, token=trust_token, expected_status=http_client.OK) resp = self.get( '/OS-TRUST/trusts/%(trust_id)s/roles/%(role_id)s' % { 'trust_id': trust['id'], 'role_id': self.role['id']}, token=trust_token) self.assertValidRoleResponse(resp, self.role) def test_do_not_consume_remaining_uses_when_get_token_fails(self): ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user['id'], project_id=self.project_id, impersonation=False, expires=dict(minutes=1), role_ids=[self.role_id], remaining_uses=3) r = self.post('/OS-TRUST/trusts', body={'trust': ref}) new_trust = r.result.get('trust') trust_id = new_trust.get('id') # Pass in another user's ID as the trustee, the result being a failed # token authenticate and the remaining_uses of the trust should not be # decremented. auth_data = self.build_authentication_request( user_id=self.default_domain_user['id'], password=self.default_domain_user['password'], trust_id=trust_id) self.v3_create_token(auth_data, expected_status=http_client.FORBIDDEN) r = self.get('/OS-TRUST/trusts/%s' % trust_id) self.assertEqual(3, r.result.get('trust').get('remaining_uses')) class TestTrustChain(test_v3.RestfulTestCase): def config_overrides(self): super(TestTrustChain, self).config_overrides() self.config_fixture.config( group='trust', enabled=True, allow_redelegation=True, max_redelegation_count=10 ) def setUp(self): super(TestTrustChain, self).setUp() """Create a trust chain using redelegation. A trust chain is a series of trusts that are redelegated. For example, self.user_list consists of userA, userB, and userC. The first trust in the trust chain is going to be established between self.user and userA, call it trustA. Then, userA is going to obtain a trust scoped token using trustA, and with that token create a trust between userA and userB called trustB. This pattern will continue with userB creating a trust with userC. So the trust chain should look something like: trustA -> trustB -> trustC Where: self.user is trusting userA with trustA userA is trusting userB with trustB userB is trusting userC with trustC """ self.user_list = list() self.trust_chain = list() for _ in range(3): user = unit.create_user(self.identity_api, domain_id=self.domain_id) self.user_list.append(user) # trustor->trustee redelegation with impersonation trustee = self.user_list[0] trust_ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=trustee['id'], project_id=self.project_id, impersonation=True, expires=dict(minutes=1), role_ids=[self.role_id], allow_redelegation=True, redelegation_count=3) # Create a trust between self.user and the first user in the list r = self.post('/OS-TRUST/trusts', body={'trust': trust_ref}) trust = self.assertValidTrustResponse(r) auth_data = self.build_authentication_request( user_id=trustee['id'], password=trustee['password'], trust_id=trust['id']) # Generate a trusted token for the first user trust_token = self.get_requested_token(auth_data) self.trust_chain.append(trust) # Loop through the user to create a chain of redelegated trust. for next_trustee in self.user_list[1:]: trust_ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=next_trustee['id'], project_id=self.project_id, impersonation=True, role_ids=[self.role_id], allow_redelegation=True) r = self.post('/OS-TRUST/trusts', body={'trust': trust_ref}, token=trust_token) trust = self.assertValidTrustResponse(r) auth_data = self.build_authentication_request( user_id=next_trustee['id'], password=next_trustee['password'], trust_id=trust['id']) trust_token = self.get_requested_token(auth_data) self.trust_chain.append(trust) trustee = self.user_list[-1] trust = self.trust_chain[-1] auth_data = self.build_authentication_request( user_id=trustee['id'], password=trustee['password'], trust_id=trust['id']) self.last_token = self.get_requested_token(auth_data) def assert_user_authenticate(self, user): auth_data = self.build_authentication_request( user_id=user['id'], password=user['password'] ) r = self.v3_create_token(auth_data) self.assertValidTokenResponse(r) def assert_trust_tokens_revoked(self, trust_id): trustee = self.user_list[0] auth_data = self.build_authentication_request( user_id=trustee['id'], password=trustee['password'] ) r = self.v3_create_token(auth_data) self.assertValidTokenResponse(r) revocation_response = self.get('/OS-REVOKE/events') revocation_events = revocation_response.json_body['events'] found = False for event in revocation_events: if event.get('OS-TRUST:trust_id') == trust_id: found = True self.assertTrue(found, 'event with trust_id %s not found in list' % trust_id) def test_delete_trust_cascade(self): self.assert_user_authenticate(self.user_list[0]) self.delete('/OS-TRUST/trusts/%(trust_id)s' % { 'trust_id': self.trust_chain[0]['id']}) headers = {'X-Subject-Token': self.last_token} self.head('/auth/tokens', headers=headers, expected_status=http_client.NOT_FOUND) self.assert_trust_tokens_revoked(self.trust_chain[0]['id']) def test_delete_broken_chain(self): self.assert_user_authenticate(self.user_list[0]) self.delete('/OS-TRUST/trusts/%(trust_id)s' % { 'trust_id': self.trust_chain[0]['id']}) # Verify the two remaining trust have been deleted for i in range(len(self.user_list) - 1): auth_data = self.build_authentication_request( user_id=self.user_list[i]['id'], password=self.user_list[i]['password']) auth_token = self.get_requested_token(auth_data) # Assert chained trust have been deleted self.get('/OS-TRUST/trusts/%(trust_id)s' % { 'trust_id': self.trust_chain[i + 1]['id']}, token=auth_token, expected_status=http_client.NOT_FOUND) def test_trustor_roles_revoked(self): self.assert_user_authenticate(self.user_list[0]) self.assignment_api.remove_role_from_user_and_project( self.user_id, self.project_id, self.role_id ) # Verify that users are not allowed to authenticate with trust for i in range(len(self.user_list[1:])): trustee = self.user_list[i] auth_data = self.build_authentication_request( user_id=trustee['id'], password=trustee['password']) # Attempt to authenticate with trust token = self.get_requested_token(auth_data) auth_data = self.build_authentication_request( token=token, trust_id=self.trust_chain[i - 1]['id']) # Trustee has no delegated roles self.v3_create_token(auth_data, expected_status=http_client.FORBIDDEN) def test_intermediate_user_disabled(self): self.assert_user_authenticate(self.user_list[0]) disabled = self.user_list[0] disabled['enabled'] = False self.identity_api.update_user(disabled['id'], disabled) # Bypass policy enforcement with mock.patch.object(rules, 'enforce', return_value=True): headers = {'X-Subject-Token': self.last_token} self.head('/auth/tokens', headers=headers, expected_status=http_client.FORBIDDEN) def test_intermediate_user_deleted(self): self.assert_user_authenticate(self.user_list[0]) self.identity_api.delete_user(self.user_list[0]['id']) # Bypass policy enforcement with mock.patch.object(rules, 'enforce', return_value=True): headers = {'X-Subject-Token': self.last_token} self.head('/auth/tokens', headers=headers, expected_status=http_client.FORBIDDEN) class TestAPIProtectionWithoutAuthContextMiddleware(test_v3.RestfulTestCase): def test_api_protection_with_no_auth_context_in_env(self): auth_data = self.build_authentication_request( user_id=self.default_domain_user['id'], password=self.default_domain_user['password'], project_id=self.project['id']) token = self.get_requested_token(auth_data) auth_controller = auth.controllers.Auth() # all we care is that auth context is not in the environment and # 'token_id' is used to build the auth context instead context = {'subject_token_id': token, 'token_id': token, 'query_string': {}, 'environment': {}} r = auth_controller.validate_token(context) self.assertEqual(http_client.OK, r.status_code) class TestAuthContext(unit.TestCase): def setUp(self): super(TestAuthContext, self).setUp() self.auth_context = auth.controllers.AuthContext() def test_pick_lowest_expires_at(self): expires_at_1 = utils.isotime(timeutils.utcnow()) expires_at_2 = utils.isotime(timeutils.utcnow() + datetime.timedelta(seconds=10)) # make sure auth_context picks the lowest value self.auth_context['expires_at'] = expires_at_1 self.auth_context['expires_at'] = expires_at_2 self.assertEqual(expires_at_1, self.auth_context['expires_at']) def test_identity_attribute_conflict(self): for identity_attr in auth.controllers.AuthContext.IDENTITY_ATTRIBUTES: self.auth_context[identity_attr] = uuid.uuid4().hex if identity_attr == 'expires_at': # 'expires_at' is a special case. Will test it in a separate # test case. continue self.assertRaises(exception.Unauthorized, operator.setitem, self.auth_context, identity_attr, uuid.uuid4().hex) def test_identity_attribute_conflict_with_none_value(self): for identity_attr in auth.controllers.AuthContext.IDENTITY_ATTRIBUTES: self.auth_context[identity_attr] = None if identity_attr == 'expires_at': # 'expires_at' is a special case and is tested above. self.auth_context['expires_at'] = uuid.uuid4().hex continue self.assertRaises(exception.Unauthorized, operator.setitem, self.auth_context, identity_attr, uuid.uuid4().hex) def test_non_identity_attribute_conflict_override(self): # for attributes Keystone doesn't know about, make sure they can be # freely manipulated attr_name = uuid.uuid4().hex attr_val_1 = uuid.uuid4().hex attr_val_2 = uuid.uuid4().hex self.auth_context[attr_name] = attr_val_1 self.auth_context[attr_name] = attr_val_2 self.assertEqual(attr_val_2, self.auth_context[attr_name]) class TestAuthSpecificData(test_v3.RestfulTestCase): def test_get_catalog_project_scoped_token(self): """Call ``GET /auth/catalog`` with a project-scoped token.""" r = self.get('/auth/catalog') self.assertValidCatalogResponse(r) def test_get_catalog_domain_scoped_token(self): """Call ``GET /auth/catalog`` with a domain-scoped token.""" # grant a domain role to a user self.put(path='/domains/%s/users/%s/roles/%s' % ( self.domain['id'], self.user['id'], self.role['id'])) self.get( '/auth/catalog', auth=self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], domain_id=self.domain['id']), expected_status=http_client.FORBIDDEN) def test_get_catalog_unscoped_token(self): """Call ``GET /auth/catalog`` with an unscoped token.""" self.get( '/auth/catalog', auth=self.build_authentication_request( user_id=self.default_domain_user['id'], password=self.default_domain_user['password']), expected_status=http_client.FORBIDDEN) def test_get_catalog_no_token(self): """Call ``GET /auth/catalog`` without a token.""" self.get( '/auth/catalog', noauth=True, expected_status=http_client.UNAUTHORIZED) def test_get_projects_project_scoped_token(self): r = self.get('/auth/projects') self.assertThat(r.json['projects'], matchers.HasLength(1)) self.assertValidProjectListResponse(r) def test_get_domains_project_scoped_token(self): self.put(path='/domains/%s/users/%s/roles/%s' % ( self.domain['id'], self.user['id'], self.role['id'])) r = self.get('/auth/domains') self.assertThat(r.json['domains'], matchers.HasLength(1)) self.assertValidDomainListResponse(r) class TestTrustAuthPKITokenProvider(TrustAPIBehavior, TestTrustChain): def config_overrides(self): super(TestTrustAuthPKITokenProvider, self).config_overrides() self.config_fixture.config(group='token', provider='pki', revoke_by_id=False) self.config_fixture.config(group='trust', enabled=True) class TestTrustAuthPKIZTokenProvider(TrustAPIBehavior, TestTrustChain): def config_overrides(self): super(TestTrustAuthPKIZTokenProvider, self).config_overrides() self.config_fixture.config(group='token', provider='pkiz', revoke_by_id=False) self.config_fixture.config(group='trust', enabled=True) class TestTrustAuthFernetTokenProvider(TrustAPIBehavior, TestTrustChain): def config_overrides(self): super(TestTrustAuthFernetTokenProvider, self).config_overrides() self.config_fixture.config(group='token', provider='fernet', revoke_by_id=False) self.config_fixture.config(group='trust', enabled=True) self.useFixture(ksfixtures.KeyRepository(self.config_fixture)) class TestAuthFernetTokenProvider(TestAuth): def setUp(self): super(TestAuthFernetTokenProvider, self).setUp() self.useFixture(ksfixtures.KeyRepository(self.config_fixture)) def config_overrides(self): super(TestAuthFernetTokenProvider, self).config_overrides() self.config_fixture.config(group='token', provider='fernet') def test_verify_with_bound_token(self): self.config_fixture.config(group='token', bind='kerberos') auth_data = self.build_authentication_request( project_id=self.project['id']) remote_user = self.default_domain_user['name'] self.admin_app.extra_environ.update({'REMOTE_USER': remote_user, 'AUTH_TYPE': 'Negotiate'}) # Bind not current supported by Fernet, see bug 1433311. self.v3_create_token(auth_data, expected_status=http_client.NOT_IMPLEMENTED) def test_v2_v3_bind_token_intermix(self): self.config_fixture.config(group='token', bind='kerberos') # we need our own user registered to the default domain because of # the way external auth works. remote_user = self.default_domain_user['name'] self.admin_app.extra_environ.update({'REMOTE_USER': remote_user, 'AUTH_TYPE': 'Negotiate'}) body = {'auth': {}} # Bind not current supported by Fernet, see bug 1433311. self.admin_request(path='/v2.0/tokens', method='POST', body=body, expected_status=http_client.NOT_IMPLEMENTED) def test_auth_with_bind_token(self): self.config_fixture.config(group='token', bind=['kerberos']) auth_data = self.build_authentication_request() remote_user = self.default_domain_user['name'] self.admin_app.extra_environ.update({'REMOTE_USER': remote_user, 'AUTH_TYPE': 'Negotiate'}) # Bind not current supported by Fernet, see bug 1433311. self.v3_create_token(auth_data, expected_status=http_client.NOT_IMPLEMENTED) class TestAuthTOTP(test_v3.RestfulTestCase): def setUp(self): super(TestAuthTOTP, self).setUp() ref = unit.new_totp_credential( user_id=self.default_domain_user['id'], project_id=self.default_domain_project['id']) self.secret = ref['blob'] r = self.post('/credentials', body={'credential': ref}) self.assertValidCredentialResponse(r, ref) self.addCleanup(self.cleanup) def auth_plugin_config_override(self): methods = ['totp', 'token', 'password'] super(TestAuthTOTP, self).auth_plugin_config_override(methods) def _make_credentials(self, cred_type, count=1, user_id=None, project_id=None, blob=None): user_id = user_id or self.default_domain_user['id'] project_id = project_id or self.default_domain_project['id'] creds = [] for __ in range(count): if cred_type == 'totp': ref = unit.new_totp_credential( user_id=user_id, project_id=project_id, blob=blob) else: ref = unit.new_credential_ref( user_id=user_id, project_id=project_id) resp = self.post('/credentials', body={'credential': ref}) creds.append(resp.json['credential']) return creds def _make_auth_data_by_id(self, passcode, user_id=None): return self.build_authentication_request( user_id=user_id or self.default_domain_user['id'], passcode=passcode, project_id=self.project['id']) def _make_auth_data_by_name(self, passcode, username, user_domain_id): return self.build_authentication_request( username=username, user_domain_id=user_domain_id, passcode=passcode, project_id=self.project['id']) def cleanup(self): totp_creds = self.credential_api.list_credentials_for_user( self.default_domain_user['id'], type='totp') other_creds = self.credential_api.list_credentials_for_user( self.default_domain_user['id'], type='other') for cred in itertools.chain(other_creds, totp_creds): self.delete('/credentials/%s' % cred['id'], expected_status=http_client.NO_CONTENT) def test_with_a_valid_passcode(self): creds = self._make_credentials('totp') secret = creds[-1]['blob'] auth_data = self._make_auth_data_by_id( totp._generate_totp_passcode(secret)) self.v3_create_token(auth_data, expected_status=http_client.CREATED) def test_with_an_invalid_passcode_and_user_credentials(self): self._make_credentials('totp') auth_data = self._make_auth_data_by_id('000000') self.v3_create_token(auth_data, expected_status=http_client.UNAUTHORIZED) def test_with_an_invalid_passcode_with_no_user_credentials(self): auth_data = self._make_auth_data_by_id('000000') self.v3_create_token(auth_data, expected_status=http_client.UNAUTHORIZED) def test_with_a_corrupt_totp_credential(self): self._make_credentials('totp', count=1, blob='0') auth_data = self._make_auth_data_by_id('000000') self.v3_create_token(auth_data, expected_status=http_client.UNAUTHORIZED) def test_with_multiple_credentials(self): self._make_credentials('other', 3) creds = self._make_credentials('totp', count=3) secret = creds[-1]['blob'] auth_data = self._make_auth_data_by_id( totp._generate_totp_passcode(secret)) self.v3_create_token(auth_data, expected_status=http_client.CREATED) def test_with_multiple_users(self): # make some credentials for the existing user self._make_credentials('totp', count=3) # create a new user and their credentials user = unit.create_user(self.identity_api, domain_id=self.domain_id) self.assignment_api.create_grant(self.role['id'], user_id=user['id'], project_id=self.project['id']) creds = self._make_credentials('totp', count=1, user_id=user['id']) secret = creds[-1]['blob'] auth_data = self._make_auth_data_by_id( totp._generate_totp_passcode(secret), user_id=user['id']) self.v3_create_token(auth_data, expected_status=http_client.CREATED) def test_with_multiple_users_and_invalid_credentials(self): """Prevent logging in with someone else's credentials. It's very easy to forget to limit the credentials query by user. Let's just test it for a sanity check. """ # make some credentials for the existing user self._make_credentials('totp', count=3) # create a new user and their credentials new_user = unit.create_user(self.identity_api, domain_id=self.domain_id) self.assignment_api.create_grant(self.role['id'], user_id=new_user['id'], project_id=self.project['id']) user2_creds = self._make_credentials( 'totp', count=1, user_id=new_user['id']) user_id = self.default_domain_user['id'] # user1 secret = user2_creds[-1]['blob'] auth_data = self._make_auth_data_by_id( totp._generate_totp_passcode(secret), user_id=user_id) self.v3_create_token(auth_data, expected_status=http_client.UNAUTHORIZED) def test_with_username_and_domain_id(self): creds = self._make_credentials('totp') secret = creds[-1]['blob'] auth_data = self._make_auth_data_by_name( totp._generate_totp_passcode(secret), username=self.default_domain_user['name'], user_domain_id=self.default_domain_user['domain_id']) self.v3_create_token(auth_data, expected_status=http_client.CREATED) class TestFetchRevocationList(test_v3.RestfulTestCase): """Test fetch token revocation list on the v3 Identity API.""" def config_overrides(self): super(TestFetchRevocationList, self).config_overrides() self.config_fixture.config(group='token', revoke_by_id=True) def test_ids_no_tokens(self): # When there's no revoked tokens the response is an empty list, and # the response is signed. res = self.get('/auth/tokens/OS-PKI/revoked') signed = res.json['signed'] clear = cms.cms_verify(signed, CONF.signing.certfile, CONF.signing.ca_certs) payload = json.loads(clear) self.assertEqual({'revoked': []}, payload) def test_ids_token(self): # When there's a revoked token, it's in the response, and the response # is signed. token_res = self.v3_create_token( self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=self.project['id'])) token_id = token_res.headers.get('X-Subject-Token') token_data = token_res.json['token'] self.delete('/auth/tokens', headers={'X-Subject-Token': token_id}) res = self.get('/auth/tokens/OS-PKI/revoked') signed = res.json['signed'] clear = cms.cms_verify(signed, CONF.signing.certfile, CONF.signing.ca_certs) payload = json.loads(clear) def truncate(ts_str): return ts_str[:19] + 'Z' # 2016-01-21T15:53:52 == 19 chars. exp_token_revoke_data = { 'id': token_id, 'audit_id': token_data['audit_ids'][0], 'expires': truncate(token_data['expires_at']), } self.assertEqual({'revoked': [exp_token_revoke_data]}, payload) def test_audit_id_only_no_tokens(self): # When there's no revoked tokens and ?audit_id_only is used, the # response is an empty list and is not signed. res = self.get('/auth/tokens/OS-PKI/revoked?audit_id_only') self.assertEqual({'revoked': []}, res.json) def test_audit_id_only_token(self): # When there's a revoked token and ?audit_id_only is used, the # response contains the audit_id of the token and is not signed. token_res = self.v3_create_token( self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=self.project['id'])) token_id = token_res.headers.get('X-Subject-Token') token_data = token_res.json['token'] self.delete('/auth/tokens', headers={'X-Subject-Token': token_id}) res = self.get('/auth/tokens/OS-PKI/revoked?audit_id_only') def truncate(ts_str): return ts_str[:19] + 'Z' # 2016-01-21T15:53:52 == 19 chars. exp_token_revoke_data = { 'audit_id': token_data['audit_ids'][0], 'expires': truncate(token_data['expires_at']), } self.assertEqual({'revoked': [exp_token_revoke_data]}, res.json) keystone-9.0.0/keystone/tests/unit/test_cli.py0000664000567000056710000004662412701407102022672 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import uuid import fixtures import mock from oslo_config import cfg from six.moves import range from testtools import matchers from keystone.cmd import cli from keystone.common import dependency from keystone.i18n import _ from keystone import resource from keystone.tests import unit from keystone.tests.unit.ksfixtures import database CONF = cfg.CONF class CliTestCase(unit.SQLDriverOverrides, unit.TestCase): def config_files(self): config_files = super(CliTestCase, self).config_files() config_files.append(unit.dirs.tests_conf('backend_sql.conf')) return config_files def test_token_flush(self): self.useFixture(database.Database()) self.load_backends() cli.TokenFlush.main() class CliBootStrapTestCase(unit.SQLDriverOverrides, unit.TestCase): def setUp(self): self.useFixture(database.Database()) super(CliBootStrapTestCase, self).setUp() def config_files(self): self.config_fixture.register_cli_opt(cli.command_opt) config_files = super(CliBootStrapTestCase, self).config_files() config_files.append(unit.dirs.tests_conf('backend_sql.conf')) return config_files def config(self, config_files): CONF(args=['bootstrap', '--bootstrap-password', uuid.uuid4().hex], project='keystone', default_config_files=config_files) def test_bootstrap(self): bootstrap = cli.BootStrap() self._do_test_bootstrap(bootstrap) def _do_test_bootstrap(self, bootstrap): bootstrap.do_bootstrap() project = bootstrap.resource_manager.get_project_by_name( bootstrap.project_name, 'default') user = bootstrap.identity_manager.get_user_by_name( bootstrap.username, 'default') role = bootstrap.role_manager.get_role(bootstrap.role_id) role_list = ( bootstrap.assignment_manager.get_roles_for_user_and_project( user['id'], project['id'])) self.assertIs(len(role_list), 1) self.assertEqual(role_list[0], role['id']) # NOTE(morganfainberg): Pass an empty context, it isn't used by # `authenticate` method. bootstrap.identity_manager.authenticate( {}, user['id'], bootstrap.password) if bootstrap.region_id: region = bootstrap.catalog_manager.get_region(bootstrap.region_id) self.assertEqual(self.region_id, region['id']) if bootstrap.service_id: svc = bootstrap.catalog_manager.get_service(bootstrap.service_id) self.assertEqual(self.service_name, svc['name']) self.assertEqual(set(['admin', 'public', 'internal']), set(bootstrap.endpoints)) urls = {'public': self.public_url, 'internal': self.internal_url, 'admin': self.admin_url} for interface, url in urls.items(): endpoint_id = bootstrap.endpoints[interface] endpoint = bootstrap.catalog_manager.get_endpoint(endpoint_id) self.assertEqual(self.region_id, endpoint['region_id']) self.assertEqual(url, endpoint['url']) self.assertEqual(svc['id'], endpoint['service_id']) self.assertEqual(interface, endpoint['interface']) def test_bootstrap_is_idempotent(self): # NOTE(morganfainberg): Ensure we can run bootstrap multiple times # without erroring. bootstrap = cli.BootStrap() self._do_test_bootstrap(bootstrap) self._do_test_bootstrap(bootstrap) class CliBootStrapTestCaseWithEnvironment(CliBootStrapTestCase): def config(self, config_files): CONF(args=['bootstrap'], project='keystone', default_config_files=config_files) def setUp(self): super(CliBootStrapTestCaseWithEnvironment, self).setUp() self.password = uuid.uuid4().hex self.username = uuid.uuid4().hex self.project_name = uuid.uuid4().hex self.role_name = uuid.uuid4().hex self.service_name = uuid.uuid4().hex self.public_url = uuid.uuid4().hex self.internal_url = uuid.uuid4().hex self.admin_url = uuid.uuid4().hex self.region_id = uuid.uuid4().hex self.default_domain = { 'id': CONF.identity.default_domain_id, 'name': 'Default', } self.useFixture( fixtures.EnvironmentVariable('OS_BOOTSTRAP_PASSWORD', newvalue=self.password)) self.useFixture( fixtures.EnvironmentVariable('OS_BOOTSTRAP_USERNAME', newvalue=self.username)) self.useFixture( fixtures.EnvironmentVariable('OS_BOOTSTRAP_PROJECT_NAME', newvalue=self.project_name)) self.useFixture( fixtures.EnvironmentVariable('OS_BOOTSTRAP_ROLE_NAME', newvalue=self.role_name)) self.useFixture( fixtures.EnvironmentVariable('OS_BOOTSTRAP_SERVICE_NAME', newvalue=self.service_name)) self.useFixture( fixtures.EnvironmentVariable('OS_BOOTSTRAP_PUBLIC_URL', newvalue=self.public_url)) self.useFixture( fixtures.EnvironmentVariable('OS_BOOTSTRAP_INTERNAL_URL', newvalue=self.internal_url)) self.useFixture( fixtures.EnvironmentVariable('OS_BOOTSTRAP_ADMIN_URL', newvalue=self.admin_url)) self.useFixture( fixtures.EnvironmentVariable('OS_BOOTSTRAP_REGION_ID', newvalue=self.region_id)) def test_assignment_created_with_user_exists(self): # test assignment can be created if user already exists. bootstrap = cli.BootStrap() bootstrap.resource_manager.create_domain(self.default_domain['id'], self.default_domain) user_ref = unit.new_user_ref(self.default_domain['id'], name=self.username, password=self.password) bootstrap.identity_manager.create_user(user_ref) self._do_test_bootstrap(bootstrap) def test_assignment_created_with_project_exists(self): # test assignment can be created if project already exists. bootstrap = cli.BootStrap() bootstrap.resource_manager.create_domain(self.default_domain['id'], self.default_domain) project_ref = unit.new_project_ref(self.default_domain['id'], name=self.project_name) bootstrap.resource_manager.create_project(project_ref['id'], project_ref) self._do_test_bootstrap(bootstrap) def test_assignment_created_with_role_exists(self): # test assignment can be created if role already exists. bootstrap = cli.BootStrap() bootstrap.resource_manager.create_domain(self.default_domain['id'], self.default_domain) role = unit.new_role_ref(name=self.role_name) bootstrap.role_manager.create_role(role['id'], role) self._do_test_bootstrap(bootstrap) def test_assignment_created_with_region_exists(self): # test assignment can be created if role already exists. bootstrap = cli.BootStrap() bootstrap.resource_manager.create_domain(self.default_domain['id'], self.default_domain) region = unit.new_region_ref(id=self.region_id) bootstrap.catalog_manager.create_region(region) self._do_test_bootstrap(bootstrap) def test_endpoints_created_with_service_exists(self): # test assignment can be created if role already exists. bootstrap = cli.BootStrap() bootstrap.resource_manager.create_domain(self.default_domain['id'], self.default_domain) service = unit.new_service_ref(name=self.service_name) bootstrap.catalog_manager.create_service(service['id'], service) self._do_test_bootstrap(bootstrap) def test_endpoints_created_with_endpoint_exists(self): # test assignment can be created if role already exists. bootstrap = cli.BootStrap() bootstrap.resource_manager.create_domain(self.default_domain['id'], self.default_domain) service = unit.new_service_ref(name=self.service_name) bootstrap.catalog_manager.create_service(service['id'], service) region = unit.new_region_ref(id=self.region_id) bootstrap.catalog_manager.create_region(region) endpoint = unit.new_endpoint_ref(interface='public', service_id=service['id'], url=self.public_url, region_id=self.region_id) bootstrap.catalog_manager.create_endpoint(endpoint['id'], endpoint) self._do_test_bootstrap(bootstrap) class CliDomainConfigAllTestCase(unit.SQLDriverOverrides, unit.TestCase): def setUp(self): self.useFixture(database.Database()) super(CliDomainConfigAllTestCase, self).setUp() self.load_backends() self.config_fixture.config( group='identity', domain_config_dir=unit.TESTCONF + '/domain_configs_multi_ldap') self.domain_count = 3 self.setup_initial_domains() def config_files(self): self.config_fixture.register_cli_opt(cli.command_opt) self.addCleanup(self.cleanup) config_files = super(CliDomainConfigAllTestCase, self).config_files() config_files.append(unit.dirs.tests_conf('backend_sql.conf')) return config_files def cleanup(self): CONF.reset() CONF.unregister_opt(cli.command_opt) def cleanup_domains(self): for domain in self.domains: if domain == 'domain_default': # Not allowed to delete the default domain, but should at least # delete any domain-specific config for it. self.domain_config_api.delete_config( CONF.identity.default_domain_id) continue this_domain = self.domains[domain] this_domain['enabled'] = False self.resource_api.update_domain(this_domain['id'], this_domain) self.resource_api.delete_domain(this_domain['id']) self.domains = {} def config(self, config_files): CONF(args=['domain_config_upload', '--all'], project='keystone', default_config_files=config_files) def setup_initial_domains(self): def create_domain(domain): return self.resource_api.create_domain(domain['id'], domain) self.domains = {} self.addCleanup(self.cleanup_domains) for x in range(1, self.domain_count): domain = 'domain%s' % x self.domains[domain] = create_domain( {'id': uuid.uuid4().hex, 'name': domain}) self.domains['domain_default'] = create_domain( resource.calc_default_domain()) def test_config_upload(self): # The values below are the same as in the domain_configs_multi_ldap # directory of test config_files. default_config = { 'ldap': {'url': 'fake://memory', 'user': 'cn=Admin', 'password': 'password', 'suffix': 'cn=example,cn=com'}, 'identity': {'driver': 'ldap'} } domain1_config = { 'ldap': {'url': 'fake://memory1', 'user': 'cn=Admin', 'password': 'password', 'suffix': 'cn=example,cn=com'}, 'identity': {'driver': 'ldap', 'list_limit': '101'} } domain2_config = { 'ldap': {'url': 'fake://memory', 'user': 'cn=Admin', 'password': 'password', 'suffix': 'cn=myroot,cn=com', 'group_tree_dn': 'ou=UserGroups,dc=myroot,dc=org', 'user_tree_dn': 'ou=Users,dc=myroot,dc=org'}, 'identity': {'driver': 'ldap'} } # Clear backend dependencies, since cli loads these manually dependency.reset() cli.DomainConfigUpload.main() res = self.domain_config_api.get_config_with_sensitive_info( CONF.identity.default_domain_id) self.assertEqual(default_config, res) res = self.domain_config_api.get_config_with_sensitive_info( self.domains['domain1']['id']) self.assertEqual(domain1_config, res) res = self.domain_config_api.get_config_with_sensitive_info( self.domains['domain2']['id']) self.assertEqual(domain2_config, res) class CliDomainConfigSingleDomainTestCase(CliDomainConfigAllTestCase): def config(self, config_files): CONF(args=['domain_config_upload', '--domain-name', 'Default'], project='keystone', default_config_files=config_files) def test_config_upload(self): # The values below are the same as in the domain_configs_multi_ldap # directory of test config_files. default_config = { 'ldap': {'url': 'fake://memory', 'user': 'cn=Admin', 'password': 'password', 'suffix': 'cn=example,cn=com'}, 'identity': {'driver': 'ldap'} } # Clear backend dependencies, since cli loads these manually dependency.reset() cli.DomainConfigUpload.main() res = self.domain_config_api.get_config_with_sensitive_info( CONF.identity.default_domain_id) self.assertEqual(default_config, res) res = self.domain_config_api.get_config_with_sensitive_info( self.domains['domain1']['id']) self.assertEqual({}, res) res = self.domain_config_api.get_config_with_sensitive_info( self.domains['domain2']['id']) self.assertEqual({}, res) def test_no_overwrite_config(self): # Create a config for the default domain default_config = { 'ldap': {'url': uuid.uuid4().hex}, 'identity': {'driver': 'ldap'} } self.domain_config_api.create_config( CONF.identity.default_domain_id, default_config) # Now try and upload the settings in the configuration file for the # default domain dependency.reset() with mock.patch('six.moves.builtins.print') as mock_print: self.assertRaises(unit.UnexpectedExit, cli.DomainConfigUpload.main) file_name = ('keystone.%s.conf' % resource.calc_default_domain()['name']) error_msg = _( 'Domain: %(domain)s already has a configuration defined - ' 'ignoring file: %(file)s.') % { 'domain': resource.calc_default_domain()['name'], 'file': os.path.join(CONF.identity.domain_config_dir, file_name)} mock_print.assert_has_calls([mock.call(error_msg)]) res = self.domain_config_api.get_config( CONF.identity.default_domain_id) # The initial config should not have been overwritten self.assertEqual(default_config, res) class CliDomainConfigNoOptionsTestCase(CliDomainConfigAllTestCase): def config(self, config_files): CONF(args=['domain_config_upload'], project='keystone', default_config_files=config_files) def test_config_upload(self): dependency.reset() with mock.patch('six.moves.builtins.print') as mock_print: self.assertRaises(unit.UnexpectedExit, cli.DomainConfigUpload.main) mock_print.assert_has_calls( [mock.call( _('At least one option must be provided, use either ' '--all or --domain-name'))]) class CliDomainConfigTooManyOptionsTestCase(CliDomainConfigAllTestCase): def config(self, config_files): CONF(args=['domain_config_upload', '--all', '--domain-name', 'Default'], project='keystone', default_config_files=config_files) def test_config_upload(self): dependency.reset() with mock.patch('six.moves.builtins.print') as mock_print: self.assertRaises(unit.UnexpectedExit, cli.DomainConfigUpload.main) mock_print.assert_has_calls( [mock.call(_('The --all option cannot be used with ' 'the --domain-name option'))]) class CliDomainConfigInvalidDomainTestCase(CliDomainConfigAllTestCase): def config(self, config_files): self.invalid_domain_name = uuid.uuid4().hex CONF(args=['domain_config_upload', '--domain-name', self.invalid_domain_name], project='keystone', default_config_files=config_files) def test_config_upload(self): dependency.reset() with mock.patch('six.moves.builtins.print') as mock_print: self.assertRaises(unit.UnexpectedExit, cli.DomainConfigUpload.main) file_name = 'keystone.%s.conf' % self.invalid_domain_name error_msg = (_( 'Invalid domain name: %(domain)s found in config file name: ' '%(file)s - ignoring this file.') % { 'domain': self.invalid_domain_name, 'file': os.path.join(CONF.identity.domain_config_dir, file_name)}) mock_print.assert_has_calls([mock.call(error_msg)]) class TestDomainConfigFinder(unit.BaseTestCase): def setUp(self): super(TestDomainConfigFinder, self).setUp() self.logging = self.useFixture(fixtures.LoggerFixture()) @mock.patch('os.walk') def test_finder_ignores_files(self, mock_walk): mock_walk.return_value = [ ['.', [], ['file.txt', 'keystone.conf', 'keystone.domain0.conf']], ] domain_configs = list(cli._domain_config_finder('.')) expected_domain_configs = [('./keystone.domain0.conf', 'domain0')] self.assertThat(domain_configs, matchers.Equals(expected_domain_configs)) expected_msg_template = ('Ignoring file (%s) while scanning ' 'domain config directory') self.assertThat( self.logging.output, matchers.Contains(expected_msg_template % 'file.txt')) self.assertThat( self.logging.output, matchers.Contains(expected_msg_template % 'keystone.conf')) keystone-9.0.0/keystone/tests/unit/test_hacking_checks.py0000664000567000056710000001150112701407102025031 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import textwrap import mock import pep8 from keystone.tests.hacking import checks from keystone.tests import unit from keystone.tests.unit.ksfixtures import hacking as hacking_fixtures class BaseStyleCheck(unit.BaseTestCase): def setUp(self): super(BaseStyleCheck, self).setUp() self.code_ex = self.useFixture(self.get_fixture()) self.addCleanup(delattr, self, 'code_ex') def get_checker(self): """Returns the checker to be used for tests in this class.""" raise NotImplemented('subclasses must provide a real implementation') def get_fixture(self): return hacking_fixtures.HackingCode() # We are patching pep8 so that only the check under test is actually # installed. @mock.patch('pep8._checks', {'physical_line': {}, 'logical_line': {}, 'tree': {}}) def run_check(self, code): pep8.register_check(self.get_checker()) lines = textwrap.dedent(code).strip().splitlines(True) checker = pep8.Checker(lines=lines) checker.check_all() checker.report._deferred_print.sort() return checker.report._deferred_print def assert_has_errors(self, code, expected_errors=None): actual_errors = [e[:3] for e in self.run_check(code)] self.assertEqual(expected_errors or [], actual_errors) class TestCheckForMutableDefaultArgs(BaseStyleCheck): def get_checker(self): return checks.CheckForMutableDefaultArgs def test(self): code = self.code_ex.mutable_default_args['code'] errors = self.code_ex.mutable_default_args['expected_errors'] self.assert_has_errors(code, expected_errors=errors) class TestBlockCommentsBeginWithASpace(BaseStyleCheck): def get_checker(self): return checks.block_comments_begin_with_a_space def test(self): code = self.code_ex.comments_begin_with_space['code'] errors = self.code_ex.comments_begin_with_space['expected_errors'] self.assert_has_errors(code, expected_errors=errors) class TestAssertingNoneEquality(BaseStyleCheck): def get_checker(self): return checks.CheckForAssertingNoneEquality def test(self): code = self.code_ex.asserting_none_equality['code'] errors = self.code_ex.asserting_none_equality['expected_errors'] self.assert_has_errors(code, expected_errors=errors) class BaseLoggingCheck(BaseStyleCheck): def get_checker(self): return checks.CheckForLoggingIssues def get_fixture(self): return hacking_fixtures.HackingLogging() def assert_has_errors(self, code, expected_errors=None): # pull out the parts of the error that we'll match against actual_errors = (e[:3] for e in self.run_check(code)) # adjust line numbers to make the fixture data more readable. import_lines = len(self.code_ex.shared_imports.split('\n')) - 1 actual_errors = [(e[0] - import_lines, e[1], e[2]) for e in actual_errors] self.assertEqual(expected_errors or [], actual_errors) class TestCheckForDebugLoggingIssues(BaseLoggingCheck): def test_for_translations(self): fixture = self.code_ex.assert_no_translations_for_debug_logging code = self.code_ex.shared_imports + fixture['code'] errors = fixture['expected_errors'] self.assert_has_errors(code, expected_errors=errors) class TestLoggingWithWarn(BaseLoggingCheck): def test(self): data = self.code_ex.assert_not_using_deprecated_warn code = self.code_ex.shared_imports + data['code'] errors = data['expected_errors'] self.assert_has_errors(code, expected_errors=errors) class TestCheckForNonDebugLoggingIssues(BaseLoggingCheck): def test_for_translations(self): for example in self.code_ex.examples: code = self.code_ex.shared_imports + example['code'] errors = example['expected_errors'] self.assert_has_errors(code, expected_errors=errors) class TestDictConstructorWithSequenceCopy(BaseStyleCheck): def get_checker(self): return checks.dict_constructor_with_sequence_copy def test(self): code = self.code_ex.dict_constructor['code'] errors = self.code_ex.dict_constructor['expected_errors'] self.assert_has_errors(code, expected_errors=errors) keystone-9.0.0/keystone/tests/unit/test_token_bind.py0000664000567000056710000001740612701407102024233 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import uuid from keystone.common import wsgi from keystone import exception from keystone.models import token_model from keystone.tests import unit from keystone.tests.unit import test_token_provider KERBEROS_BIND = 'USER@REALM' ANY = 'any' class BindTest(unit.TestCase): """Test binding tokens to a Principal. Even though everything in this file references kerberos the same concepts will apply to all future binding mechanisms. """ def setUp(self): super(BindTest, self).setUp() self.TOKEN_BIND_KERB = copy.deepcopy( test_token_provider.SAMPLE_V3_TOKEN) self.TOKEN_BIND_KERB['token']['bind'] = {'kerberos': KERBEROS_BIND} self.TOKEN_BIND_UNKNOWN = copy.deepcopy( test_token_provider.SAMPLE_V3_TOKEN) self.TOKEN_BIND_UNKNOWN['token']['bind'] = {'FOO': 'BAR'} self.TOKEN_BIND_NONE = copy.deepcopy( test_token_provider.SAMPLE_V3_TOKEN) self.ALL_TOKENS = [self.TOKEN_BIND_KERB, self.TOKEN_BIND_UNKNOWN, self.TOKEN_BIND_NONE] def assert_kerberos_bind(self, tokens, bind_level, use_kerberos=True, success=True): if not isinstance(tokens, dict): for token in tokens: self.assert_kerberos_bind(token, bind_level, use_kerberos=use_kerberos, success=success) elif use_kerberos == ANY: for val in (True, False): self.assert_kerberos_bind(tokens, bind_level, use_kerberos=val, success=success) else: context = {'environment': {}} self.config_fixture.config(group='token', enforce_token_bind=bind_level) if use_kerberos: context['environment']['REMOTE_USER'] = KERBEROS_BIND context['environment']['AUTH_TYPE'] = 'Negotiate' # NOTE(morganfainberg): This assumes a V3 token. token_ref = token_model.KeystoneToken( token_id=uuid.uuid4().hex, token_data=tokens) if not success: self.assertRaises(exception.Unauthorized, wsgi.validate_token_bind, context, token_ref) else: wsgi.validate_token_bind(context, token_ref) # DISABLED def test_bind_disabled_with_kerb_user(self): self.assert_kerberos_bind(self.ALL_TOKENS, bind_level='disabled', use_kerberos=ANY, success=True) # PERMISSIVE def test_bind_permissive_with_kerb_user(self): self.assert_kerberos_bind(self.TOKEN_BIND_KERB, bind_level='permissive', use_kerberos=True, success=True) def test_bind_permissive_with_regular_token(self): self.assert_kerberos_bind(self.TOKEN_BIND_NONE, bind_level='permissive', use_kerberos=ANY, success=True) def test_bind_permissive_without_kerb_user(self): self.assert_kerberos_bind(self.TOKEN_BIND_KERB, bind_level='permissive', use_kerberos=False, success=False) def test_bind_permissive_with_unknown_bind(self): self.assert_kerberos_bind(self.TOKEN_BIND_UNKNOWN, bind_level='permissive', use_kerberos=ANY, success=True) # STRICT def test_bind_strict_with_regular_token(self): self.assert_kerberos_bind(self.TOKEN_BIND_NONE, bind_level='strict', use_kerberos=ANY, success=True) def test_bind_strict_with_kerb_user(self): self.assert_kerberos_bind(self.TOKEN_BIND_KERB, bind_level='strict', use_kerberos=True, success=True) def test_bind_strict_without_kerb_user(self): self.assert_kerberos_bind(self.TOKEN_BIND_KERB, bind_level='strict', use_kerberos=False, success=False) def test_bind_strict_with_unknown_bind(self): self.assert_kerberos_bind(self.TOKEN_BIND_UNKNOWN, bind_level='strict', use_kerberos=ANY, success=False) # REQUIRED def test_bind_required_with_regular_token(self): self.assert_kerberos_bind(self.TOKEN_BIND_NONE, bind_level='required', use_kerberos=ANY, success=False) def test_bind_required_with_kerb_user(self): self.assert_kerberos_bind(self.TOKEN_BIND_KERB, bind_level='required', use_kerberos=True, success=True) def test_bind_required_without_kerb_user(self): self.assert_kerberos_bind(self.TOKEN_BIND_KERB, bind_level='required', use_kerberos=False, success=False) def test_bind_required_with_unknown_bind(self): self.assert_kerberos_bind(self.TOKEN_BIND_UNKNOWN, bind_level='required', use_kerberos=ANY, success=False) # NAMED def test_bind_named_with_regular_token(self): self.assert_kerberos_bind(self.TOKEN_BIND_NONE, bind_level='kerberos', use_kerberos=ANY, success=False) def test_bind_named_with_kerb_user(self): self.assert_kerberos_bind(self.TOKEN_BIND_KERB, bind_level='kerberos', use_kerberos=True, success=True) def test_bind_named_without_kerb_user(self): self.assert_kerberos_bind(self.TOKEN_BIND_KERB, bind_level='kerberos', use_kerberos=False, success=False) def test_bind_named_with_unknown_bind(self): self.assert_kerberos_bind(self.TOKEN_BIND_UNKNOWN, bind_level='kerberos', use_kerberos=ANY, success=False) def test_bind_named_with_unknown_scheme(self): self.assert_kerberos_bind(self.ALL_TOKENS, bind_level='unknown', use_kerberos=ANY, success=False) keystone-9.0.0/keystone/tests/unit/rest.py0000664000567000056710000002265412701407102022036 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils from six.moves import http_client import webtest from keystone.auth import controllers as auth_controllers from keystone.tests import unit from keystone.tests.unit import default_fixtures from keystone.tests.unit.ksfixtures import database class RestfulTestCase(unit.TestCase): """Performs restful tests against the WSGI app over HTTP. This class launches public & admin WSGI servers for every test, which can be accessed by calling ``public_request()`` or ``admin_request()``, respectfully. ``restful_request()`` and ``request()`` methods are also exposed if you need to bypass restful conventions or access HTTP details in your test implementation. Three new asserts are provided: * ``assertResponseSuccessful``: called automatically for every request unless an ``expected_status`` is provided * ``assertResponseStatus``: called instead of ``assertResponseSuccessful``, if an ``expected_status`` is provided * ``assertValidResponseHeaders``: validates that the response headers appear as expected Requests are automatically serialized according to the defined ``content_type``. Responses are automatically deserialized as well, and available in the ``response.body`` attribute. The original body content is available in the ``response.raw`` attribute. """ # default content type to test content_type = 'json' def get_extensions(self): return None def setUp(self, app_conf='keystone'): super(RestfulTestCase, self).setUp() # Will need to reset the plug-ins self.addCleanup(setattr, auth_controllers, 'AUTH_METHODS', {}) self.useFixture(database.Database(self.sql_driver_version_overrides)) self.load_backends() self.load_fixtures(default_fixtures) self.public_app = webtest.TestApp( self.loadapp(app_conf, name='main')) self.addCleanup(delattr, self, 'public_app') self.admin_app = webtest.TestApp( self.loadapp(app_conf, name='admin')) self.addCleanup(delattr, self, 'admin_app') def request(self, app, path, body=None, headers=None, token=None, expected_status=None, **kwargs): if headers: headers = {str(k): str(v) for k, v in headers.items()} else: headers = {} if token: headers['X-Auth-Token'] = str(token) # sets environ['REMOTE_ADDR'] kwargs.setdefault('remote_addr', 'localhost') response = app.request(path, headers=headers, status=expected_status, body=body, **kwargs) return response def assertResponseSuccessful(self, response): """Asserts that a status code lies inside the 2xx range. :param response: :py:class:`httplib.HTTPResponse` to be verified to have a status code between 200 and 299. example:: self.assertResponseSuccessful(response) """ self.assertTrue( response.status_code >= 200 and response.status_code <= 299, 'Status code %d is outside of the expected range (2xx)\n\n%s' % (response.status, response.body)) def assertResponseStatus(self, response, expected_status): """Asserts a specific status code on the response. :param response: :py:class:`httplib.HTTPResponse` :param expected_status: The specific ``status`` result expected example:: self.assertResponseStatus(response, http_client.NO_CONTENT) """ self.assertEqual( expected_status, response.status_code, 'Status code %s is not %s, as expected\n\n%s' % (response.status_code, expected_status, response.body)) def assertValidResponseHeaders(self, response): """Ensures that response headers appear as expected.""" self.assertIn('X-Auth-Token', response.headers.get('Vary')) def assertValidErrorResponse(self, response, expected_status=http_client.BAD_REQUEST): """Verify that the error response is valid. Subclasses can override this function based on the expected response. """ self.assertEqual(expected_status, response.status_code) error = response.result['error'] self.assertEqual(response.status_code, error['code']) self.assertIsNotNone(error.get('title')) def _to_content_type(self, body, headers, content_type=None): """Attempt to encode JSON and XML automatically.""" content_type = content_type or self.content_type if content_type == 'json': headers['Accept'] = 'application/json' if body: headers['Content-Type'] = 'application/json' # NOTE(davechen):dump the body to bytes since WSGI requires # the body of the response to be `Bytestrings`. # see pep-3333: # https://www.python.org/dev/peps/pep-3333/#a-note-on-string-types return jsonutils.dump_as_bytes(body) def _from_content_type(self, response, content_type=None): """Attempt to decode JSON and XML automatically, if detected.""" content_type = content_type or self.content_type if response.body is not None and response.body.strip(): # if a body is provided, a Content-Type is also expected header = response.headers.get('Content-Type') self.assertIn(content_type, header) if content_type == 'json': response.result = jsonutils.loads(response.body) else: response.result = response.body def restful_request(self, method='GET', headers=None, body=None, content_type=None, response_content_type=None, **kwargs): """Serializes/deserializes json as request/response body. .. WARNING:: * Existing Accept header will be overwritten. * Existing Content-Type header will be overwritten. """ # Initialize headers dictionary headers = {} if not headers else headers body = self._to_content_type(body, headers, content_type) # Perform the HTTP request/response response = self.request(method=method, headers=headers, body=body, **kwargs) response_content_type = response_content_type or content_type self._from_content_type(response, content_type=response_content_type) # we can save some code & improve coverage by always doing this if (method != 'HEAD' and response.status_code >= http_client.BAD_REQUEST): self.assertValidErrorResponse(response) # Contains the decoded response.body return response def _request(self, convert=True, **kwargs): if convert: response = self.restful_request(**kwargs) else: response = self.request(**kwargs) self.assertValidResponseHeaders(response) return response def public_request(self, **kwargs): return self._request(app=self.public_app, **kwargs) def admin_request(self, **kwargs): return self._request(app=self.admin_app, **kwargs) def _get_token(self, body): """Convenience method so that we can test authenticated requests.""" r = self.public_request(method='POST', path='/v2.0/tokens', body=body) return self._get_token_id(r) def get_admin_token(self): return self._get_token({ 'auth': { 'passwordCredentials': { 'username': self.user_reqadmin['name'], 'password': self.user_reqadmin['password'] }, 'tenantId': 'service' } }) def get_unscoped_token(self): """Convenience method so that we can test authenticated requests.""" return self._get_token({ 'auth': { 'passwordCredentials': { 'username': self.user_foo['name'], 'password': self.user_foo['password'], }, }, }) def get_scoped_token(self, tenant_id=None): """Convenience method so that we can test authenticated requests.""" if not tenant_id: tenant_id = self.tenant_bar['id'] return self._get_token({ 'auth': { 'passwordCredentials': { 'username': self.user_foo['name'], 'password': self.user_foo['password'], }, 'tenantId': tenant_id, }, }) def _get_token_id(self, r): """Helper method to return a token ID from a response. This needs to be overridden by child classes for on their content type. """ raise NotImplementedError() keystone-9.0.0/keystone/tests/unit/test_v3_identity.py0000664000567000056710000010200512701407105024351 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import uuid import fixtures import mock from oslo_config import cfg from six.moves import http_client from testtools import matchers from keystone.common import controller from keystone import exception from keystone.tests import unit from keystone.tests.unit import test_v3 CONF = cfg.CONF # NOTE(morganfainberg): To be removed when admin_token_auth middleware is # removed. This was moved to it's own testcase so it can setup the # admin_token_auth pipeline without impacting other tests. class IdentityTestCaseStaticAdminToken(test_v3.RestfulTestCase): EXTENSION_TO_ADD = 'admin_token_auth' def config_overrides(self): super(IdentityTestCaseStaticAdminToken, self).config_overrides() self.config_fixture.config( admin_token='ADMIN') def test_list_users_with_static_admin_token_and_multiple_backends(self): # domain-specific operations with the bootstrap ADMIN token is # disallowed when domain-specific drivers are enabled self.config_fixture.config(group='identity', domain_specific_drivers_enabled=True) self.get('/users', token=CONF.admin_token, expected_status=exception.Unauthorized.code) def test_create_user_with_admin_token_and_no_domain(self): """Call ``POST /users`` with admin token but no domain id. It should not be possible to use the admin token to create a user while not explicitly passing the domain in the request body. """ # Passing a valid domain id to new_user_ref() since domain_id is # not an optional parameter. ref = unit.new_user_ref(domain_id=self.domain_id) # Delete the domain id before sending the request. del ref['domain_id'] self.post('/users', body={'user': ref}, token=CONF.admin_token, expected_status=http_client.BAD_REQUEST) class IdentityTestCase(test_v3.RestfulTestCase): """Test users and groups.""" def setUp(self): super(IdentityTestCase, self).setUp() self.group = unit.new_group_ref(domain_id=self.domain_id) self.group = self.identity_api.create_group(self.group) self.group_id = self.group['id'] self.credential = unit.new_credential_ref( user_id=self.user['id'], project_id=self.project_id) self.credential_api.create_credential(self.credential['id'], self.credential) # user crud tests def test_create_user(self): """Call ``POST /users``.""" ref = unit.new_user_ref(domain_id=self.domain_id) r = self.post( '/users', body={'user': ref}) return self.assertValidUserResponse(r, ref) def test_create_user_without_domain(self): """Call ``POST /users`` without specifying domain. According to the identity-api specification, if you do not explicitly specific the domain_id in the entity, it should take the domain scope of the token as the domain_id. """ # Create a user with a role on the domain so we can get a # domain scoped token domain = unit.new_domain_ref() self.resource_api.create_domain(domain['id'], domain) user = unit.create_user(self.identity_api, domain_id=domain['id']) self.assignment_api.create_grant( role_id=self.role_id, user_id=user['id'], domain_id=domain['id']) ref = unit.new_user_ref(domain_id=domain['id']) ref_nd = ref.copy() ref_nd.pop('domain_id') auth = self.build_authentication_request( user_id=user['id'], password=user['password'], domain_id=domain['id']) r = self.post('/users', body={'user': ref_nd}, auth=auth) self.assertValidUserResponse(r, ref) # Now try the same thing without a domain token - which should fail ref = unit.new_user_ref(domain_id=domain['id']) ref_nd = ref.copy() ref_nd.pop('domain_id') auth = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=self.project['id']) # TODO(henry-nash): Due to bug #1283539 we currently automatically # use the default domain_id if a domain scoped token is not being # used. For now we just check that a deprecation warning has been # issued. Change the code below to expect a failure once this bug is # fixed. with mock.patch( 'oslo_log.versionutils.report_deprecated_feature') as mock_dep: r = self.post('/users', body={'user': ref_nd}, auth=auth) self.assertTrue(mock_dep.called) ref['domain_id'] = CONF.identity.default_domain_id return self.assertValidUserResponse(r, ref) def test_create_user_with_admin_token_and_domain(self): """Call ``POST /users`` with admin token and domain id.""" ref = unit.new_user_ref(domain_id=self.domain_id) self.post('/users', body={'user': ref}, token=self.get_admin_token(), expected_status=http_client.CREATED) def test_user_management_normalized_keys(self): """Illustrate the inconsistent handling of hyphens in keys. To quote Morgan in bug 1526244: the reason this is converted from "domain-id" to "domain_id" is because of how we process/normalize data. The way we have to handle specific data types for known columns requires avoiding "-" in the actual python code since "-" is not valid for attributes in python w/o significant use of "getattr" etc. In short, historically we handle some things in conversions. The use of "extras" has long been a poor design choice that leads to odd/strange inconsistent behaviors because of other choices made in handling data from within the body. (In many cases we convert from "-" to "_" throughout openstack) Source: https://bugs.launchpad.net/keystone/+bug/1526244/comments/9 """ # Create two domains to work with. domain1 = unit.new_domain_ref() self.resource_api.create_domain(domain1['id'], domain1) domain2 = unit.new_domain_ref() self.resource_api.create_domain(domain2['id'], domain2) # We can successfully create a normal user without any surprises. user = unit.new_user_ref(domain_id=domain1['id']) r = self.post( '/users', body={'user': user}) self.assertValidUserResponse(r, user) user['id'] = r.json['user']['id'] # Query strings are not normalized: so we get all users back (like # self.user), not just the ones in the specified domain. r = self.get( '/users?domain-id=%s' % domain1['id']) self.assertValidUserListResponse(r, ref=self.user) self.assertNotEqual(domain1['id'], self.user['domain_id']) # When creating a new user, if we move the 'domain_id' into the # 'domain-id' attribute, the server will normalize the request # attribute, and effectively "move it back" for us. user = unit.new_user_ref(domain_id=domain1['id']) user['domain-id'] = user.pop('domain_id') r = self.post( '/users', body={'user': user}) self.assertNotIn('domain-id', r.json['user']) self.assertEqual(domain1['id'], r.json['user']['domain_id']) # (move this attribute back so we can use assertValidUserResponse) user['domain_id'] = user.pop('domain-id') self.assertValidUserResponse(r, user) user['id'] = r.json['user']['id'] # If we try updating the user's 'domain_id' by specifying a # 'domain-id', then it'll be stored into extras rather than normalized, # and the user's actual 'domain_id' is not affected. r = self.patch( '/users/%s' % user['id'], body={'user': {'domain-id': domain2['id']}}) self.assertEqual(domain2['id'], r.json['user']['domain-id']) self.assertEqual(user['domain_id'], r.json['user']['domain_id']) self.assertNotEqual(domain2['id'], user['domain_id']) self.assertValidUserResponse(r, user) def test_create_user_bad_request(self): """Call ``POST /users``.""" self.post('/users', body={'user': {}}, expected_status=http_client.BAD_REQUEST) def test_list_users(self): """Call ``GET /users``.""" resource_url = '/users' r = self.get(resource_url) self.assertValidUserListResponse(r, ref=self.user, resource_url=resource_url) def test_list_users_with_multiple_backends(self): """Call ``GET /users`` when multiple backends is enabled. In this scenario, the controller requires a domain to be specified either as a filter or by using a domain scoped token. """ self.config_fixture.config(group='identity', domain_specific_drivers_enabled=True) # Create a new domain with a new project and user domain = unit.new_domain_ref() self.resource_api.create_domain(domain['id'], domain) project = unit.new_project_ref(domain_id=domain['id']) self.resource_api.create_project(project['id'], project) user = unit.create_user(self.identity_api, domain_id=domain['id']) # Create both project and domain role grants for the user so we # can get both project and domain scoped tokens self.assignment_api.create_grant( role_id=self.role_id, user_id=user['id'], domain_id=domain['id']) self.assignment_api.create_grant( role_id=self.role_id, user_id=user['id'], project_id=project['id']) dom_auth = self.build_authentication_request( user_id=user['id'], password=user['password'], domain_id=domain['id']) project_auth = self.build_authentication_request( user_id=user['id'], password=user['password'], project_id=project['id']) # First try using a domain scoped token resource_url = '/users' r = self.get(resource_url, auth=dom_auth) self.assertValidUserListResponse(r, ref=user, resource_url=resource_url) # Now try using a project scoped token resource_url = '/users' r = self.get(resource_url, auth=project_auth) self.assertValidUserListResponse(r, ref=user, resource_url=resource_url) # Now try with an explicit filter resource_url = ('/users?domain_id=%(domain_id)s' % {'domain_id': domain['id']}) r = self.get(resource_url) self.assertValidUserListResponse(r, ref=user, resource_url=resource_url) def test_list_users_no_default_project(self): """Call ``GET /users`` making sure no default_project_id.""" user = unit.new_user_ref(self.domain_id) user = self.identity_api.create_user(user) resource_url = '/users' r = self.get(resource_url) self.assertValidUserListResponse(r, ref=user, resource_url=resource_url) def test_get_user(self): """Call ``GET /users/{user_id}``.""" r = self.get('/users/%(user_id)s' % { 'user_id': self.user['id']}) self.assertValidUserResponse(r, self.user) def test_get_user_with_default_project(self): """Call ``GET /users/{user_id}`` making sure of default_project_id.""" user = unit.new_user_ref(domain_id=self.domain_id, project_id=self.project_id) user = self.identity_api.create_user(user) r = self.get('/users/%(user_id)s' % {'user_id': user['id']}) self.assertValidUserResponse(r, user) def test_add_user_to_group(self): """Call ``PUT /groups/{group_id}/users/{user_id}``.""" self.put('/groups/%(group_id)s/users/%(user_id)s' % { 'group_id': self.group_id, 'user_id': self.user['id']}) def test_list_groups_for_user(self): """Call ``GET /users/{user_id}/groups``.""" user1 = unit.create_user(self.identity_api, domain_id=self.domain['id']) user2 = unit.create_user(self.identity_api, domain_id=self.domain['id']) self.put('/groups/%(group_id)s/users/%(user_id)s' % { 'group_id': self.group_id, 'user_id': user1['id']}) # Scenarios below are written to test the default policy configuration # One should be allowed to list one's own groups auth = self.build_authentication_request( user_id=user1['id'], password=user1['password']) resource_url = ('/users/%(user_id)s/groups' % {'user_id': user1['id']}) r = self.get(resource_url, auth=auth) self.assertValidGroupListResponse(r, ref=self.group, resource_url=resource_url) # Administrator is allowed to list others' groups resource_url = ('/users/%(user_id)s/groups' % {'user_id': user1['id']}) r = self.get(resource_url) self.assertValidGroupListResponse(r, ref=self.group, resource_url=resource_url) # Ordinary users should not be allowed to list other's groups auth = self.build_authentication_request( user_id=user2['id'], password=user2['password']) r = self.get('/users/%(user_id)s/groups' % { 'user_id': user1['id']}, auth=auth, expected_status=exception.ForbiddenAction.code) def test_check_user_in_group(self): """Call ``HEAD /groups/{group_id}/users/{user_id}``.""" self.put('/groups/%(group_id)s/users/%(user_id)s' % { 'group_id': self.group_id, 'user_id': self.user['id']}) self.head('/groups/%(group_id)s/users/%(user_id)s' % { 'group_id': self.group_id, 'user_id': self.user['id']}) def test_list_users_in_group(self): """Call ``GET /groups/{group_id}/users``.""" self.put('/groups/%(group_id)s/users/%(user_id)s' % { 'group_id': self.group_id, 'user_id': self.user['id']}) resource_url = ('/groups/%(group_id)s/users' % {'group_id': self.group_id}) r = self.get(resource_url) self.assertValidUserListResponse(r, ref=self.user, resource_url=resource_url) self.assertIn('/groups/%(group_id)s/users' % { 'group_id': self.group_id}, r.result['links']['self']) def test_remove_user_from_group(self): """Call ``DELETE /groups/{group_id}/users/{user_id}``.""" self.put('/groups/%(group_id)s/users/%(user_id)s' % { 'group_id': self.group_id, 'user_id': self.user['id']}) self.delete('/groups/%(group_id)s/users/%(user_id)s' % { 'group_id': self.group_id, 'user_id': self.user['id']}) def test_update_user(self): """Call ``PATCH /users/{user_id}``.""" user = unit.new_user_ref(domain_id=self.domain_id) del user['id'] r = self.patch('/users/%(user_id)s' % { 'user_id': self.user['id']}, body={'user': user}) self.assertValidUserResponse(r, user) def test_admin_password_reset(self): # bootstrap a user as admin user_ref = unit.create_user(self.identity_api, domain_id=self.domain['id']) # auth as user should work before a password change old_password_auth = self.build_authentication_request( user_id=user_ref['id'], password=user_ref['password']) r = self.v3_create_token(old_password_auth) old_token = r.headers.get('X-Subject-Token') # auth as user with a token should work before a password change old_token_auth = self.build_authentication_request(token=old_token) self.v3_create_token(old_token_auth) # administrative password reset new_password = uuid.uuid4().hex self.patch('/users/%s' % user_ref['id'], body={'user': {'password': new_password}}) # auth as user with original password should not work after change self.v3_create_token(old_password_auth, expected_status=http_client.UNAUTHORIZED) # auth as user with an old token should not work after change self.v3_create_token(old_token_auth, expected_status=http_client.NOT_FOUND) # new password should work new_password_auth = self.build_authentication_request( user_id=user_ref['id'], password=new_password) self.v3_create_token(new_password_auth) def test_update_user_domain_id(self): """Call ``PATCH /users/{user_id}`` with domain_id.""" user = unit.new_user_ref(domain_id=self.domain['id']) user = self.identity_api.create_user(user) user['domain_id'] = CONF.identity.default_domain_id r = self.patch('/users/%(user_id)s' % { 'user_id': user['id']}, body={'user': user}, expected_status=exception.ValidationError.code) self.config_fixture.config(domain_id_immutable=False) user['domain_id'] = self.domain['id'] r = self.patch('/users/%(user_id)s' % { 'user_id': user['id']}, body={'user': user}) self.assertValidUserResponse(r, user) def test_delete_user(self): """Call ``DELETE /users/{user_id}``. As well as making sure the delete succeeds, we ensure that any credentials that reference this user are also deleted, while other credentials are unaffected. In addition, no tokens should remain valid for this user. """ # First check the credential for this user is present r = self.credential_api.get_credential(self.credential['id']) self.assertDictEqual(self.credential, r) # Create a second credential with a different user user2 = unit.new_user_ref(domain_id=self.domain['id'], project_id=self.project['id']) user2 = self.identity_api.create_user(user2) credential2 = unit.new_credential_ref(user_id=user2['id'], project_id=self.project['id']) self.credential_api.create_credential(credential2['id'], credential2) # Create a token for this user which we can check later # gets deleted auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=self.project['id']) token = self.get_requested_token(auth_data) # Confirm token is valid for now self.head('/auth/tokens', headers={'X-Subject-Token': token}, expected_status=http_client.OK) # Now delete the user self.delete('/users/%(user_id)s' % { 'user_id': self.user['id']}) # Deleting the user should have deleted any credentials # that reference this project self.assertRaises(exception.CredentialNotFound, self.credential_api.get_credential, self.credential['id']) # And the no tokens we remain valid tokens = self.token_provider_api._persistence._list_tokens( self.user['id']) self.assertEqual(0, len(tokens)) # But the credential for user2 is unaffected r = self.credential_api.get_credential(credential2['id']) self.assertDictEqual(credential2, r) # shadow user tests def test_shadow_federated_user(self): fed_user = unit.new_federated_user_ref() user = ( self.identity_api.shadow_federated_user(fed_user["idp_id"], fed_user["protocol_id"], fed_user["unique_id"], fed_user["display_name"]) ) self.assertIsNotNone(user["id"]) self.assertEqual(len(user.keys()), 4) self.assertIsNotNone(user['id']) self.assertIsNotNone(user['name']) self.assertIsNone(user['domain_id']) self.assertEqual(user['enabled'], True) def test_shadow_existing_federated_user(self): fed_user = unit.new_federated_user_ref() # introduce the user to keystone for the first time shadow_user1 = self.identity_api.shadow_federated_user( fed_user["idp_id"], fed_user["protocol_id"], fed_user["unique_id"], fed_user["display_name"]) self.assertEqual(fed_user['display_name'], shadow_user1['name']) # shadow the user again, with another name to invalidate the cache # internally, this operation causes request to the driver. It should # not fail. fed_user['display_name'] = uuid.uuid4().hex shadow_user2 = self.identity_api.shadow_federated_user( fed_user["idp_id"], fed_user["protocol_id"], fed_user["unique_id"], fed_user["display_name"]) # FIXME(dolph): These assertEqual / assertNotEqual should be reversed, # to illustrate that the display name has been updated as expected. self.assertNotEqual(fed_user['display_name'], shadow_user2['name']) self.assertEqual(shadow_user1['name'], shadow_user2['name']) # The shadowed users still share the same unique ID. self.assertEqual(shadow_user1['id'], shadow_user2['id']) # group crud tests def test_create_group(self): """Call ``POST /groups``.""" # Create a new group to avoid a duplicate check failure ref = unit.new_group_ref(domain_id=self.domain_id) r = self.post( '/groups', body={'group': ref}) return self.assertValidGroupResponse(r, ref) def test_create_group_bad_request(self): """Call ``POST /groups``.""" self.post('/groups', body={'group': {}}, expected_status=http_client.BAD_REQUEST) def test_list_groups(self): """Call ``GET /groups``.""" resource_url = '/groups' r = self.get(resource_url) self.assertValidGroupListResponse(r, ref=self.group, resource_url=resource_url) def test_get_group(self): """Call ``GET /groups/{group_id}``.""" r = self.get('/groups/%(group_id)s' % { 'group_id': self.group_id}) self.assertValidGroupResponse(r, self.group) def test_update_group(self): """Call ``PATCH /groups/{group_id}``.""" group = unit.new_group_ref(domain_id=self.domain_id) del group['id'] r = self.patch('/groups/%(group_id)s' % { 'group_id': self.group_id}, body={'group': group}) self.assertValidGroupResponse(r, group) def test_update_group_domain_id(self): """Call ``PATCH /groups/{group_id}`` with domain_id.""" self.group['domain_id'] = CONF.identity.default_domain_id r = self.patch('/groups/%(group_id)s' % { 'group_id': self.group['id']}, body={'group': self.group}, expected_status=exception.ValidationError.code) self.config_fixture.config(domain_id_immutable=False) self.group['domain_id'] = self.domain['id'] r = self.patch('/groups/%(group_id)s' % { 'group_id': self.group['id']}, body={'group': self.group}) self.assertValidGroupResponse(r, self.group) def test_delete_group(self): """Call ``DELETE /groups/{group_id}``.""" self.delete('/groups/%(group_id)s' % { 'group_id': self.group_id}) def test_create_user_password_not_logged(self): # When a user is created, the password isn't logged at any level. log_fix = self.useFixture(fixtures.FakeLogger(level=logging.DEBUG)) ref = unit.new_user_ref(domain_id=self.domain_id) self.post( '/users', body={'user': ref}) self.assertNotIn(ref['password'], log_fix.output) def test_update_password_not_logged(self): # When admin modifies user password, the password isn't logged at any # level. log_fix = self.useFixture(fixtures.FakeLogger(level=logging.DEBUG)) # bootstrap a user as admin user_ref = unit.create_user(self.identity_api, domain_id=self.domain['id']) self.assertNotIn(user_ref['password'], log_fix.output) # administrative password reset new_password = uuid.uuid4().hex self.patch('/users/%s' % user_ref['id'], body={'user': {'password': new_password}}) self.assertNotIn(new_password, log_fix.output) class IdentityV3toV2MethodsTestCase(unit.TestCase): """Test users V3 to V2 conversion methods.""" def new_user_ref(self, **kwargs): """Construct a bare bones user ref. Omits all optional components. """ ref = unit.new_user_ref(**kwargs) # description is already omitted del ref['email'] del ref['enabled'] del ref['password'] return ref def setUp(self): super(IdentityV3toV2MethodsTestCase, self).setUp() self.load_backends() user_id = uuid.uuid4().hex project_id = uuid.uuid4().hex # User with only default_project_id in ref self.user1 = self.new_user_ref( id=user_id, name=user_id, project_id=project_id, domain_id=CONF.identity.default_domain_id) # User without default_project_id or tenantId in ref self.user2 = self.new_user_ref( id=user_id, name=user_id, domain_id=CONF.identity.default_domain_id) # User with both tenantId and default_project_id in ref self.user3 = self.new_user_ref( id=user_id, name=user_id, project_id=project_id, tenantId=project_id, domain_id=CONF.identity.default_domain_id) # User with only tenantId in ref self.user4 = self.new_user_ref( id=user_id, name=user_id, tenantId=project_id, domain_id=CONF.identity.default_domain_id) # Expected result if the user is meant to have a tenantId element self.expected_user = {'id': user_id, 'name': user_id, 'username': user_id, 'tenantId': project_id} # Expected result if the user is not meant to have a tenantId element self.expected_user_no_tenant_id = {'id': user_id, 'name': user_id, 'username': user_id} def test_v3_to_v2_user_method(self): updated_user1 = controller.V2Controller.v3_to_v2_user(self.user1) self.assertIs(self.user1, updated_user1) self.assertDictEqual(self.expected_user, self.user1) updated_user2 = controller.V2Controller.v3_to_v2_user(self.user2) self.assertIs(self.user2, updated_user2) self.assertDictEqual(self.expected_user_no_tenant_id, self.user2) updated_user3 = controller.V2Controller.v3_to_v2_user(self.user3) self.assertIs(self.user3, updated_user3) self.assertDictEqual(self.expected_user, self.user3) updated_user4 = controller.V2Controller.v3_to_v2_user(self.user4) self.assertIs(self.user4, updated_user4) self.assertDictEqual(self.expected_user_no_tenant_id, self.user4) def test_v3_to_v2_user_method_list(self): user_list = [self.user1, self.user2, self.user3, self.user4] updated_list = controller.V2Controller.v3_to_v2_user(user_list) self.assertEqual(len(user_list), len(updated_list)) for i, ref in enumerate(updated_list): # Order should not change. self.assertIs(ref, user_list[i]) self.assertDictEqual(self.expected_user, self.user1) self.assertDictEqual(self.expected_user_no_tenant_id, self.user2) self.assertDictEqual(self.expected_user, self.user3) self.assertDictEqual(self.expected_user_no_tenant_id, self.user4) class UserSelfServiceChangingPasswordsTestCase(test_v3.RestfulTestCase): def setUp(self): super(UserSelfServiceChangingPasswordsTestCase, self).setUp() self.user_ref = unit.create_user(self.identity_api, domain_id=self.domain['id']) self.token = self.get_request_token(self.user_ref['password'], http_client.CREATED) def get_request_token(self, password, expected_status): auth_data = self.build_authentication_request( user_id=self.user_ref['id'], password=password) r = self.v3_create_token(auth_data, expected_status=expected_status) return r.headers.get('X-Subject-Token') def change_password(self, expected_status, **kwargs): """Returns a test response for a change password request.""" return self.post('/users/%s/password' % self.user_ref['id'], body={'user': kwargs}, token=self.token, expected_status=expected_status) def test_changing_password(self): # original password works token_id = self.get_request_token(self.user_ref['password'], expected_status=http_client.CREATED) # original token works old_token_auth = self.build_authentication_request(token=token_id) self.v3_create_token(old_token_auth) # change password new_password = uuid.uuid4().hex self.change_password(password=new_password, original_password=self.user_ref['password'], expected_status=http_client.NO_CONTENT) # old password fails self.get_request_token(self.user_ref['password'], expected_status=http_client.UNAUTHORIZED) # old token fails self.v3_create_token(old_token_auth, expected_status=http_client.NOT_FOUND) # new password works self.get_request_token(new_password, expected_status=http_client.CREATED) def test_changing_password_with_missing_original_password_fails(self): r = self.change_password(password=uuid.uuid4().hex, expected_status=http_client.BAD_REQUEST) self.assertThat(r.result['error']['message'], matchers.Contains('original_password')) def test_changing_password_with_missing_password_fails(self): r = self.change_password(original_password=self.user_ref['password'], expected_status=http_client.BAD_REQUEST) self.assertThat(r.result['error']['message'], matchers.Contains('password')) def test_changing_password_with_incorrect_password_fails(self): self.change_password(password=uuid.uuid4().hex, original_password=uuid.uuid4().hex, expected_status=http_client.UNAUTHORIZED) def test_changing_password_with_disabled_user_fails(self): # disable the user account self.user_ref['enabled'] = False self.patch('/users/%s' % self.user_ref['id'], body={'user': self.user_ref}) self.change_password(password=uuid.uuid4().hex, original_password=self.user_ref['password'], expected_status=http_client.UNAUTHORIZED) def test_changing_password_not_logged(self): # When a user changes their password, the password isn't logged at any # level. log_fix = self.useFixture(fixtures.FakeLogger(level=logging.DEBUG)) # change password new_password = uuid.uuid4().hex self.change_password(password=new_password, original_password=self.user_ref['password'], expected_status=http_client.NO_CONTENT) self.assertNotIn(self.user_ref['password'], log_fix.output) self.assertNotIn(new_password, log_fix.output) keystone-9.0.0/keystone/tests/unit/schema/0000775000567000056710000000000012701407246021747 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/schema/__init__.py0000664000567000056710000000000012701407102024035 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/schema/v2.py0000664000567000056710000001113112701407102022634 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from keystone.common import validation from keystone.common.validation import parameter_types from keystone.common.validation import validators _project_properties = { 'id': parameter_types.id_string, 'name': parameter_types.name, 'enabled': parameter_types.boolean, 'description': validation.nullable(parameter_types.description), } _token_properties = { 'audit_ids': { 'type': 'array', 'items': { 'type': 'string', }, 'minItems': 1, 'maxItems': 2, }, 'id': {'type': 'string'}, 'expires': {'type': 'string'}, 'issued_at': {'type': 'string'}, 'tenant': { 'type': 'object', 'properties': _project_properties, 'required': ['id', 'name', 'enabled'], 'additionalProperties': False, }, } _role_properties = { 'name': parameter_types.name, } _user_properties = { 'id': parameter_types.id_string, 'name': parameter_types.name, 'username': parameter_types.name, 'roles': { 'type': 'array', 'items': { 'type': 'object', 'properties': _role_properties, 'required': ['name'], 'additionalProperties': False, }, }, 'roles_links': { 'type': 'array', 'maxItems': 0, }, } _metadata_properties = { 'is_admin': {'type': 'integer'}, 'roles': { 'type': 'array', 'items': {'type': 'string'}, }, } _endpoint_properties = { 'id': {'type': 'string'}, 'adminURL': parameter_types.url, 'internalURL': parameter_types.url, 'publicURL': parameter_types.url, 'region': {'type': 'string'}, } _service_properties = { 'type': {'type': 'string'}, 'name': parameter_types.name, 'endpoints_links': { 'type': 'array', 'maxItems': 0, }, 'endpoints': { 'type': 'array', 'minItems': 1, 'items': { 'type': 'object', 'properties': _endpoint_properties, 'required': ['id', 'publicURL'], 'additionalProperties': False, }, }, } _base_access_properties = { 'metadata': { 'type': 'object', 'properties': _metadata_properties, 'required': ['is_admin', 'roles'], 'additionalProperties': False, }, 'serviceCatalog': { 'type': 'array', 'items': { 'type': 'object', 'properties': _service_properties, 'required': ['name', 'type', 'endpoints_links', 'endpoints'], 'additionalProperties': False, }, }, 'token': { 'type': 'object', 'properties': _token_properties, 'required': ['audit_ids', 'id', 'expires', 'issued_at'], 'additionalProperties': False, }, 'user': { 'type': 'object', 'properties': _user_properties, 'required': ['id', 'name', 'username', 'roles', 'roles_links'], 'additionalProperties': False, }, } _unscoped_access_properties = copy.deepcopy(_base_access_properties) unscoped_metadata = _unscoped_access_properties['metadata'] unscoped_metadata['properties']['roles']['maxItems'] = 0 _unscoped_access_properties['user']['properties']['roles']['maxItems'] = 0 _unscoped_access_properties['serviceCatalog']['maxItems'] = 0 _scoped_access_properties = copy.deepcopy(_base_access_properties) _scoped_access_properties['metadata']['properties']['roles']['minItems'] = 1 _scoped_access_properties['serviceCatalog']['minItems'] = 1 _scoped_access_properties['user']['properties']['roles']['minItems'] = 1 base_token_schema = { 'type': 'object', 'required': ['metadata', 'user', 'serviceCatalog', 'token'], 'additionalProperties': False, } unscoped_token_schema = copy.deepcopy(base_token_schema) unscoped_token_schema['properties'] = _unscoped_access_properties scoped_token_schema = copy.deepcopy(base_token_schema) scoped_token_schema['properties'] = _scoped_access_properties # Validator objects unscoped_validator = validators.SchemaValidator(unscoped_token_schema) scoped_validator = validators.SchemaValidator(scoped_token_schema) keystone-9.0.0/keystone/tests/unit/test_v3_federation.py0000664000567000056710000044352312701407105024655 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import os import random from testtools import matchers import uuid import fixtures from lxml import etree import mock from oslo_config import cfg from oslo_log import versionutils from oslo_serialization import jsonutils from oslo_utils import importutils from oslotest import mockpatch import saml2 from saml2 import saml from saml2 import sigver from six.moves import http_client from six.moves import range, urllib, zip xmldsig = importutils.try_import("saml2.xmldsig") if not xmldsig: xmldsig = importutils.try_import("xmldsig") from keystone.auth import controllers as auth_controllers from keystone.common import environment from keystone.contrib.federation import routers from keystone import exception from keystone.federation import controllers as federation_controllers from keystone.federation import idp as keystone_idp from keystone import notifications from keystone.tests import unit from keystone.tests.unit import core from keystone.tests.unit import federation_fixtures from keystone.tests.unit import ksfixtures from keystone.tests.unit import mapping_fixtures from keystone.tests.unit import test_v3 from keystone.tests.unit import utils from keystone.token.providers import common as token_common subprocess = environment.subprocess CONF = cfg.CONF ROOTDIR = os.path.dirname(os.path.abspath(__file__)) XMLDIR = os.path.join(ROOTDIR, 'saml2/') def dummy_validator(*args, **kwargs): pass class FederationTests(test_v3.RestfulTestCase): @mock.patch.object(versionutils, 'report_deprecated_feature') def test_exception_happens(self, mock_deprecator): routers.FederationExtension(mock.ANY) mock_deprecator.assert_called_once_with(mock.ANY, mock.ANY) args, _kwargs = mock_deprecator.call_args self.assertIn("Remove federation_extension from", args[1]) class FederatedSetupMixin(object): ACTION = 'authenticate' IDP = 'ORG_IDP' PROTOCOL = 'saml2' AUTH_METHOD = 'saml2' USER = 'user@ORGANIZATION' ASSERTION_PREFIX = 'PREFIX_' IDP_WITH_REMOTE = 'ORG_IDP_REMOTE' REMOTE_IDS = ['entityID_IDP1', 'entityID_IDP2'] REMOTE_ID_ATTR = uuid.uuid4().hex UNSCOPED_V3_SAML2_REQ = { "identity": { "methods": [AUTH_METHOD], AUTH_METHOD: { "identity_provider": IDP, "protocol": PROTOCOL } } } def _check_domains_are_valid(self, token): self.assertEqual('Federated', token['user']['domain']['id']) self.assertEqual('Federated', token['user']['domain']['name']) def _project(self, project): return (project['id'], project['name']) def _roles(self, roles): return set([(r['id'], r['name']) for r in roles]) def _check_projects_and_roles(self, token, roles, projects): """Check whether the projects and the roles match.""" token_roles = token.get('roles') if token_roles is None: raise AssertionError('Roles not found in the token') token_roles = self._roles(token_roles) roles_ref = self._roles(roles) self.assertEqual(token_roles, roles_ref) token_projects = token.get('project') if token_projects is None: raise AssertionError('Projects not found in the token') token_projects = self._project(token_projects) projects_ref = self._project(projects) self.assertEqual(token_projects, projects_ref) def _check_scoped_token_attributes(self, token): for obj in ('user', 'catalog', 'expires_at', 'issued_at', 'methods', 'roles'): self.assertIn(obj, token) os_federation = token['user']['OS-FEDERATION'] self.assertIn('groups', os_federation) self.assertIn('identity_provider', os_federation) self.assertIn('protocol', os_federation) self.assertThat(os_federation, matchers.HasLength(3)) self.assertEqual(self.IDP, os_federation['identity_provider']['id']) self.assertEqual(self.PROTOCOL, os_federation['protocol']['id']) def _check_project_scoped_token_attributes(self, token, project_id): self.assertEqual(project_id, token['project']['id']) self._check_scoped_token_attributes(token) def _check_domain_scoped_token_attributes(self, token, domain_id): self.assertEqual(domain_id, token['domain']['id']) self._check_scoped_token_attributes(token) def assertValidMappedUser(self, token): """Check if user object meets all the criteria.""" user = token['user'] self.assertIn('id', user) self.assertIn('name', user) self.assertIn('domain', user) self.assertIn('groups', user['OS-FEDERATION']) self.assertIn('identity_provider', user['OS-FEDERATION']) self.assertIn('protocol', user['OS-FEDERATION']) # Make sure user_id is url safe self.assertEqual(urllib.parse.quote(user['name']), user['id']) def _issue_unscoped_token(self, idp=None, assertion='EMPLOYEE_ASSERTION', environment=None): api = federation_controllers.Auth() context = {'environment': environment or {}} self._inject_assertion(context, assertion) if idp is None: idp = self.IDP r = api.federated_authentication(context, idp, self.PROTOCOL) return r def idp_ref(self, id=None): idp = { 'id': id or uuid.uuid4().hex, 'enabled': True, 'description': uuid.uuid4().hex } return idp def proto_ref(self, mapping_id=None): proto = { 'id': uuid.uuid4().hex, 'mapping_id': mapping_id or uuid.uuid4().hex } return proto def mapping_ref(self, rules=None): return { 'id': uuid.uuid4().hex, 'rules': rules or self.rules['rules'] } def _scope_request(self, unscoped_token_id, scope, scope_id): return { 'auth': { 'identity': { 'methods': [ self.AUTH_METHOD ], self.AUTH_METHOD: { 'id': unscoped_token_id } }, 'scope': { scope: { 'id': scope_id } } } } def _inject_assertion(self, context, variant, query_string=None): assertion = getattr(mapping_fixtures, variant) context['environment'].update(assertion) context['query_string'] = query_string or [] def load_federation_sample_data(self): """Inject additional data.""" # Create and add domains self.domainA = unit.new_domain_ref() self.resource_api.create_domain(self.domainA['id'], self.domainA) self.domainB = unit.new_domain_ref() self.resource_api.create_domain(self.domainB['id'], self.domainB) self.domainC = unit.new_domain_ref() self.resource_api.create_domain(self.domainC['id'], self.domainC) self.domainD = unit.new_domain_ref() self.resource_api.create_domain(self.domainD['id'], self.domainD) # Create and add projects self.proj_employees = unit.new_project_ref( domain_id=self.domainA['id']) self.resource_api.create_project(self.proj_employees['id'], self.proj_employees) self.proj_customers = unit.new_project_ref( domain_id=self.domainA['id']) self.resource_api.create_project(self.proj_customers['id'], self.proj_customers) self.project_all = unit.new_project_ref( domain_id=self.domainA['id']) self.resource_api.create_project(self.project_all['id'], self.project_all) self.project_inherited = unit.new_project_ref( domain_id=self.domainD['id']) self.resource_api.create_project(self.project_inherited['id'], self.project_inherited) # Create and add groups self.group_employees = unit.new_group_ref(domain_id=self.domainA['id']) self.group_employees = ( self.identity_api.create_group(self.group_employees)) self.group_customers = unit.new_group_ref(domain_id=self.domainA['id']) self.group_customers = ( self.identity_api.create_group(self.group_customers)) self.group_admins = unit.new_group_ref(domain_id=self.domainA['id']) self.group_admins = self.identity_api.create_group(self.group_admins) # Create and add roles self.role_employee = unit.new_role_ref() self.role_api.create_role(self.role_employee['id'], self.role_employee) self.role_customer = unit.new_role_ref() self.role_api.create_role(self.role_customer['id'], self.role_customer) self.role_admin = unit.new_role_ref() self.role_api.create_role(self.role_admin['id'], self.role_admin) # Employees can access # * proj_employees # * project_all self.assignment_api.create_grant(self.role_employee['id'], group_id=self.group_employees['id'], project_id=self.proj_employees['id']) self.assignment_api.create_grant(self.role_employee['id'], group_id=self.group_employees['id'], project_id=self.project_all['id']) # Customers can access # * proj_customers self.assignment_api.create_grant(self.role_customer['id'], group_id=self.group_customers['id'], project_id=self.proj_customers['id']) # Admins can access: # * proj_customers # * proj_employees # * project_all self.assignment_api.create_grant(self.role_admin['id'], group_id=self.group_admins['id'], project_id=self.proj_customers['id']) self.assignment_api.create_grant(self.role_admin['id'], group_id=self.group_admins['id'], project_id=self.proj_employees['id']) self.assignment_api.create_grant(self.role_admin['id'], group_id=self.group_admins['id'], project_id=self.project_all['id']) self.assignment_api.create_grant(self.role_customer['id'], group_id=self.group_customers['id'], domain_id=self.domainA['id']) # Customers can access: # * domain A self.assignment_api.create_grant(self.role_customer['id'], group_id=self.group_customers['id'], domain_id=self.domainA['id']) # Customers can access projects via inheritance: # * domain D self.assignment_api.create_grant(self.role_customer['id'], group_id=self.group_customers['id'], domain_id=self.domainD['id'], inherited_to_projects=True) # Employees can access: # * domain A # * domain B self.assignment_api.create_grant(self.role_employee['id'], group_id=self.group_employees['id'], domain_id=self.domainA['id']) self.assignment_api.create_grant(self.role_employee['id'], group_id=self.group_employees['id'], domain_id=self.domainB['id']) # Admins can access: # * domain A # * domain B # * domain C self.assignment_api.create_grant(self.role_admin['id'], group_id=self.group_admins['id'], domain_id=self.domainA['id']) self.assignment_api.create_grant(self.role_admin['id'], group_id=self.group_admins['id'], domain_id=self.domainB['id']) self.assignment_api.create_grant(self.role_admin['id'], group_id=self.group_admins['id'], domain_id=self.domainC['id']) self.rules = { 'rules': [ { 'local': [ { 'group': { 'id': self.group_employees['id'] } }, { 'user': { 'name': '{0}', 'id': '{1}' } } ], 'remote': [ { 'type': 'UserName' }, { 'type': 'Email', }, { 'type': 'orgPersonType', 'any_one_of': [ 'Employee' ] } ] }, { 'local': [ { 'group': { 'id': self.group_employees['id'] } }, { 'user': { 'name': '{0}', 'id': '{1}' } } ], 'remote': [ { 'type': self.ASSERTION_PREFIX + 'UserName' }, { 'type': self.ASSERTION_PREFIX + 'Email', }, { 'type': self.ASSERTION_PREFIX + 'orgPersonType', 'any_one_of': [ 'SuperEmployee' ] } ] }, { 'local': [ { 'group': { 'id': self.group_customers['id'] } }, { 'user': { 'name': '{0}', 'id': '{1}' } } ], 'remote': [ { 'type': 'UserName' }, { 'type': 'Email' }, { 'type': 'orgPersonType', 'any_one_of': [ 'Customer' ] } ] }, { 'local': [ { 'group': { 'id': self.group_admins['id'] } }, { 'group': { 'id': self.group_employees['id'] } }, { 'group': { 'id': self.group_customers['id'] } }, { 'user': { 'name': '{0}', 'id': '{1}' } } ], 'remote': [ { 'type': 'UserName' }, { 'type': 'Email' }, { 'type': 'orgPersonType', 'any_one_of': [ 'Admin', 'Chief' ] } ] }, { 'local': [ { 'group': { 'id': uuid.uuid4().hex } }, { 'group': { 'id': self.group_customers['id'] } }, { 'user': { 'name': '{0}', 'id': '{1}' } } ], 'remote': [ { 'type': 'UserName', }, { 'type': 'Email', }, { 'type': 'FirstName', 'any_one_of': [ 'Jill' ] }, { 'type': 'LastName', 'any_one_of': [ 'Smith' ] } ] }, { 'local': [ { 'group': { 'id': 'this_group_no_longer_exists' } }, { 'user': { 'name': '{0}', 'id': '{1}' } } ], 'remote': [ { 'type': 'UserName', }, { 'type': 'Email', }, { 'type': 'Email', 'any_one_of': [ 'testacct@example.com' ] }, { 'type': 'orgPersonType', 'any_one_of': [ 'Tester' ] } ] }, # rules with local group names { "local": [ { 'user': { 'name': '{0}', 'id': '{1}' } }, { "group": { "name": self.group_customers['name'], "domain": { "name": self.domainA['name'] } } } ], "remote": [ { 'type': 'UserName', }, { 'type': 'Email', }, { "type": "orgPersonType", "any_one_of": [ "CEO", "CTO" ], } ] }, { "local": [ { 'user': { 'name': '{0}', 'id': '{1}' } }, { "group": { "name": self.group_admins['name'], "domain": { "id": self.domainA['id'] } } } ], "remote": [ { "type": "UserName", }, { "type": "Email", }, { "type": "orgPersonType", "any_one_of": [ "Managers" ] } ] }, { "local": [ { "user": { "name": "{0}", "id": "{1}" } }, { "group": { "name": "NON_EXISTING", "domain": { "id": self.domainA['id'] } } } ], "remote": [ { "type": "UserName", }, { "type": "Email", }, { "type": "UserName", "any_one_of": [ "IamTester" ] } ] }, { "local": [ { "user": { "type": "local", "name": self.user['name'], "domain": { "id": self.user['domain_id'] } } }, { "group": { "id": self.group_customers['id'] } } ], "remote": [ { "type": "UserType", "any_one_of": [ "random" ] } ] }, { "local": [ { "user": { "type": "local", "name": self.user['name'], "domain": { "id": uuid.uuid4().hex } } } ], "remote": [ { "type": "Position", "any_one_of": [ "DirectorGeneral" ] } ] } ] } # Add IDP self.idp = self.idp_ref(id=self.IDP) self.federation_api.create_idp(self.idp['id'], self.idp) # Add IDP with remote self.idp_with_remote = self.idp_ref(id=self.IDP_WITH_REMOTE) self.idp_with_remote['remote_ids'] = self.REMOTE_IDS self.federation_api.create_idp(self.idp_with_remote['id'], self.idp_with_remote) # Add a mapping self.mapping = self.mapping_ref() self.federation_api.create_mapping(self.mapping['id'], self.mapping) # Add protocols self.proto_saml = self.proto_ref(mapping_id=self.mapping['id']) self.proto_saml['id'] = self.PROTOCOL self.federation_api.create_protocol(self.idp['id'], self.proto_saml['id'], self.proto_saml) # Add protocols IDP with remote self.federation_api.create_protocol(self.idp_with_remote['id'], self.proto_saml['id'], self.proto_saml) # Generate fake tokens context = {'environment': {}} self.tokens = {} VARIANTS = ('EMPLOYEE_ASSERTION', 'CUSTOMER_ASSERTION', 'ADMIN_ASSERTION') api = auth_controllers.Auth() for variant in VARIANTS: self._inject_assertion(context, variant) r = api.authenticate_for_token(context, self.UNSCOPED_V3_SAML2_REQ) self.tokens[variant] = r.headers.get('X-Subject-Token') self.TOKEN_SCOPE_PROJECT_FROM_NONEXISTENT_TOKEN = self._scope_request( uuid.uuid4().hex, 'project', self.proj_customers['id']) self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_EMPLOYEE = self._scope_request( self.tokens['EMPLOYEE_ASSERTION'], 'project', self.proj_employees['id']) self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_ADMIN = self._scope_request( self.tokens['ADMIN_ASSERTION'], 'project', self.proj_employees['id']) self.TOKEN_SCOPE_PROJECT_CUSTOMER_FROM_ADMIN = self._scope_request( self.tokens['ADMIN_ASSERTION'], 'project', self.proj_customers['id']) self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_CUSTOMER = self._scope_request( self.tokens['CUSTOMER_ASSERTION'], 'project', self.proj_employees['id']) self.TOKEN_SCOPE_PROJECT_INHERITED_FROM_CUSTOMER = self._scope_request( self.tokens['CUSTOMER_ASSERTION'], 'project', self.project_inherited['id']) self.TOKEN_SCOPE_DOMAIN_A_FROM_CUSTOMER = self._scope_request( self.tokens['CUSTOMER_ASSERTION'], 'domain', self.domainA['id']) self.TOKEN_SCOPE_DOMAIN_B_FROM_CUSTOMER = self._scope_request( self.tokens['CUSTOMER_ASSERTION'], 'domain', self.domainB['id']) self.TOKEN_SCOPE_DOMAIN_D_FROM_CUSTOMER = self._scope_request( self.tokens['CUSTOMER_ASSERTION'], 'domain', self.domainD['id']) self.TOKEN_SCOPE_DOMAIN_A_FROM_ADMIN = self._scope_request( self.tokens['ADMIN_ASSERTION'], 'domain', self.domainA['id']) self.TOKEN_SCOPE_DOMAIN_B_FROM_ADMIN = self._scope_request( self.tokens['ADMIN_ASSERTION'], 'domain', self.domainB['id']) self.TOKEN_SCOPE_DOMAIN_C_FROM_ADMIN = self._scope_request( self.tokens['ADMIN_ASSERTION'], 'domain', self.domainC['id']) class FederatedIdentityProviderTests(test_v3.RestfulTestCase): """A test class for Identity Providers.""" idp_keys = ['description', 'enabled'] default_body = {'description': None, 'enabled': True} def base_url(self, suffix=None): if suffix is not None: return '/OS-FEDERATION/identity_providers/' + str(suffix) return '/OS-FEDERATION/identity_providers' def _fetch_attribute_from_response(self, resp, parameter, assert_is_not_none=True): """Fetch single attribute from TestResponse object.""" result = resp.result.get(parameter) if assert_is_not_none: self.assertIsNotNone(result) return result def _create_and_decapsulate_response(self, body=None): """Create IdP and fetch it's random id along with entity.""" default_resp = self._create_default_idp(body=body) idp = self._fetch_attribute_from_response(default_resp, 'identity_provider') self.assertIsNotNone(idp) idp_id = idp.get('id') return (idp_id, idp) def _get_idp(self, idp_id): """Fetch IdP entity based on its id.""" url = self.base_url(suffix=idp_id) resp = self.get(url) return resp def _create_default_idp(self, body=None): """Create default IdP.""" url = self.base_url(suffix=uuid.uuid4().hex) if body is None: body = self._http_idp_input() resp = self.put(url, body={'identity_provider': body}, expected_status=http_client.CREATED) return resp def _http_idp_input(self, **kwargs): """Create default input for IdP data.""" body = None if 'body' not in kwargs: body = self.default_body.copy() body['description'] = uuid.uuid4().hex else: body = kwargs['body'] return body def _assign_protocol_to_idp(self, idp_id=None, proto=None, url=None, mapping_id=None, validate=True, **kwargs): if url is None: url = self.base_url(suffix='%(idp_id)s/protocols/%(protocol_id)s') if idp_id is None: idp_id, _ = self._create_and_decapsulate_response() if proto is None: proto = uuid.uuid4().hex if mapping_id is None: mapping_id = uuid.uuid4().hex body = {'mapping_id': mapping_id} url = url % {'idp_id': idp_id, 'protocol_id': proto} resp = self.put(url, body={'protocol': body}, **kwargs) if validate: self.assertValidResponse(resp, 'protocol', dummy_validator, keys_to_check=['id', 'mapping_id'], ref={'id': proto, 'mapping_id': mapping_id}) return (resp, idp_id, proto) def _get_protocol(self, idp_id, protocol_id): url = "%s/protocols/%s" % (idp_id, protocol_id) url = self.base_url(suffix=url) r = self.get(url) return r def test_create_idp(self): """Creates the IdentityProvider entity associated to remote_ids.""" keys_to_check = list(self.idp_keys) body = self.default_body.copy() body['description'] = uuid.uuid4().hex resp = self._create_default_idp(body=body) self.assertValidResponse(resp, 'identity_provider', dummy_validator, keys_to_check=keys_to_check, ref=body) def test_create_idp_remote(self): """Creates the IdentityProvider entity associated to remote_ids.""" keys_to_check = list(self.idp_keys) keys_to_check.append('remote_ids') body = self.default_body.copy() body['description'] = uuid.uuid4().hex body['remote_ids'] = [uuid.uuid4().hex, uuid.uuid4().hex, uuid.uuid4().hex] resp = self._create_default_idp(body=body) self.assertValidResponse(resp, 'identity_provider', dummy_validator, keys_to_check=keys_to_check, ref=body) def test_create_idp_remote_repeated(self): """Creates two IdentityProvider entities with some remote_ids A remote_id is the same for both so the second IdP is not created because of the uniqueness of the remote_ids Expect HTTP 409 Conflict code for the latter call. """ body = self.default_body.copy() repeated_remote_id = uuid.uuid4().hex body['remote_ids'] = [uuid.uuid4().hex, uuid.uuid4().hex, uuid.uuid4().hex, repeated_remote_id] self._create_default_idp(body=body) url = self.base_url(suffix=uuid.uuid4().hex) body['remote_ids'] = [uuid.uuid4().hex, repeated_remote_id] resp = self.put(url, body={'identity_provider': body}, expected_status=http_client.CONFLICT) resp_data = jsonutils.loads(resp.body) self.assertIn('Duplicate remote ID', resp_data.get('error', {}).get('message')) def test_create_idp_remote_empty(self): """Creates an IdP with empty remote_ids.""" keys_to_check = list(self.idp_keys) keys_to_check.append('remote_ids') body = self.default_body.copy() body['description'] = uuid.uuid4().hex body['remote_ids'] = [] resp = self._create_default_idp(body=body) self.assertValidResponse(resp, 'identity_provider', dummy_validator, keys_to_check=keys_to_check, ref=body) def test_create_idp_remote_none(self): """Creates an IdP with a None remote_ids.""" keys_to_check = list(self.idp_keys) keys_to_check.append('remote_ids') body = self.default_body.copy() body['description'] = uuid.uuid4().hex body['remote_ids'] = None resp = self._create_default_idp(body=body) expected = body.copy() expected['remote_ids'] = [] self.assertValidResponse(resp, 'identity_provider', dummy_validator, keys_to_check=keys_to_check, ref=expected) def test_update_idp_remote_ids(self): """Update IdP's remote_ids parameter.""" body = self.default_body.copy() body['remote_ids'] = [uuid.uuid4().hex] default_resp = self._create_default_idp(body=body) default_idp = self._fetch_attribute_from_response(default_resp, 'identity_provider') idp_id = default_idp.get('id') url = self.base_url(suffix=idp_id) self.assertIsNotNone(idp_id) body['remote_ids'] = [uuid.uuid4().hex, uuid.uuid4().hex] body = {'identity_provider': body} resp = self.patch(url, body=body) updated_idp = self._fetch_attribute_from_response(resp, 'identity_provider') body = body['identity_provider'] self.assertEqual(sorted(body['remote_ids']), sorted(updated_idp.get('remote_ids'))) resp = self.get(url) returned_idp = self._fetch_attribute_from_response(resp, 'identity_provider') self.assertEqual(sorted(body['remote_ids']), sorted(returned_idp.get('remote_ids'))) def test_update_idp_clean_remote_ids(self): """Update IdP's remote_ids parameter with an empty list.""" body = self.default_body.copy() body['remote_ids'] = [uuid.uuid4().hex] default_resp = self._create_default_idp(body=body) default_idp = self._fetch_attribute_from_response(default_resp, 'identity_provider') idp_id = default_idp.get('id') url = self.base_url(suffix=idp_id) self.assertIsNotNone(idp_id) body['remote_ids'] = [] body = {'identity_provider': body} resp = self.patch(url, body=body) updated_idp = self._fetch_attribute_from_response(resp, 'identity_provider') body = body['identity_provider'] self.assertEqual(sorted(body['remote_ids']), sorted(updated_idp.get('remote_ids'))) resp = self.get(url) returned_idp = self._fetch_attribute_from_response(resp, 'identity_provider') self.assertEqual(sorted(body['remote_ids']), sorted(returned_idp.get('remote_ids'))) def test_list_idps(self, iterations=5): """Lists all available IdentityProviders. This test collects ids of created IdPs and intersects it with the list of all available IdPs. List of all IdPs can be a superset of IdPs created in this test, because other tests also create IdPs. """ def get_id(resp): r = self._fetch_attribute_from_response(resp, 'identity_provider') return r.get('id') ids = [] for _ in range(iterations): id = get_id(self._create_default_idp()) ids.append(id) ids = set(ids) keys_to_check = self.idp_keys url = self.base_url() resp = self.get(url) self.assertValidListResponse(resp, 'identity_providers', dummy_validator, keys_to_check=keys_to_check) entities = self._fetch_attribute_from_response(resp, 'identity_providers') entities_ids = set([e['id'] for e in entities]) ids_intersection = entities_ids.intersection(ids) self.assertEqual(ids_intersection, ids) def test_filter_list_idp_by_id(self): def get_id(resp): r = self._fetch_attribute_from_response(resp, 'identity_provider') return r.get('id') idp1_id = get_id(self._create_default_idp()) idp2_id = get_id(self._create_default_idp()) # list the IdP, should get two IdP. url = self.base_url() resp = self.get(url) entities = self._fetch_attribute_from_response(resp, 'identity_providers') entities_ids = [e['id'] for e in entities] self.assertItemsEqual(entities_ids, [idp1_id, idp2_id]) # filter the IdP by ID. url = self.base_url() + '?id=' + idp1_id resp = self.get(url) filtered_service_list = resp.json['identity_providers'] self.assertThat(filtered_service_list, matchers.HasLength(1)) self.assertEqual(idp1_id, filtered_service_list[0].get('id')) def test_filter_list_idp_by_enabled(self): def get_id(resp): r = self._fetch_attribute_from_response(resp, 'identity_provider') return r.get('id') idp1_id = get_id(self._create_default_idp()) body = self.default_body.copy() body['enabled'] = False idp2_id = get_id(self._create_default_idp(body=body)) # list the IdP, should get two IdP. url = self.base_url() resp = self.get(url) entities = self._fetch_attribute_from_response(resp, 'identity_providers') entities_ids = [e['id'] for e in entities] self.assertItemsEqual(entities_ids, [idp1_id, idp2_id]) # filter the IdP by 'enabled'. url = self.base_url() + '?enabled=True' resp = self.get(url) filtered_service_list = resp.json['identity_providers'] self.assertThat(filtered_service_list, matchers.HasLength(1)) self.assertEqual(idp1_id, filtered_service_list[0].get('id')) def test_check_idp_uniqueness(self): """Add same IdP twice. Expect HTTP 409 Conflict code for the latter call. """ url = self.base_url(suffix=uuid.uuid4().hex) body = self._http_idp_input() self.put(url, body={'identity_provider': body}, expected_status=http_client.CREATED) resp = self.put(url, body={'identity_provider': body}, expected_status=http_client.CONFLICT) resp_data = jsonutils.loads(resp.body) self.assertIn('Duplicate entry', resp_data.get('error', {}).get('message')) def test_get_idp(self): """Create and later fetch IdP.""" body = self._http_idp_input() default_resp = self._create_default_idp(body=body) default_idp = self._fetch_attribute_from_response(default_resp, 'identity_provider') idp_id = default_idp.get('id') url = self.base_url(suffix=idp_id) resp = self.get(url) self.assertValidResponse(resp, 'identity_provider', dummy_validator, keys_to_check=body.keys(), ref=body) def test_get_nonexisting_idp(self): """Fetch nonexisting IdP entity. Expected HTTP 404 Not Found status code. """ idp_id = uuid.uuid4().hex self.assertIsNotNone(idp_id) url = self.base_url(suffix=idp_id) self.get(url, expected_status=http_client.NOT_FOUND) def test_delete_existing_idp(self): """Create and later delete IdP. Expect HTTP 404 Not Found for the GET IdP call. """ default_resp = self._create_default_idp() default_idp = self._fetch_attribute_from_response(default_resp, 'identity_provider') idp_id = default_idp.get('id') self.assertIsNotNone(idp_id) url = self.base_url(suffix=idp_id) self.delete(url) self.get(url, expected_status=http_client.NOT_FOUND) def test_delete_idp_also_deletes_assigned_protocols(self): """Deleting an IdP will delete its assigned protocol.""" # create default IdP default_resp = self._create_default_idp() default_idp = self._fetch_attribute_from_response(default_resp, 'identity_provider') idp_id = default_idp['id'] protocol_id = uuid.uuid4().hex url = self.base_url(suffix='%(idp_id)s/protocols/%(protocol_id)s') idp_url = self.base_url(suffix=idp_id) # assign protocol to IdP kwargs = {'expected_status': http_client.CREATED} resp, idp_id, proto = self._assign_protocol_to_idp( url=url, idp_id=idp_id, proto=protocol_id, **kwargs) # removing IdP will remove the assigned protocol as well self.assertEqual(1, len(self.federation_api.list_protocols(idp_id))) self.delete(idp_url) self.get(idp_url, expected_status=http_client.NOT_FOUND) self.assertEqual(0, len(self.federation_api.list_protocols(idp_id))) def test_delete_nonexisting_idp(self): """Delete nonexisting IdP. Expect HTTP 404 Not Found for the GET IdP call. """ idp_id = uuid.uuid4().hex url = self.base_url(suffix=idp_id) self.delete(url, expected_status=http_client.NOT_FOUND) def test_update_idp_mutable_attributes(self): """Update IdP's mutable parameters.""" default_resp = self._create_default_idp() default_idp = self._fetch_attribute_from_response(default_resp, 'identity_provider') idp_id = default_idp.get('id') url = self.base_url(suffix=idp_id) self.assertIsNotNone(idp_id) _enabled = not default_idp.get('enabled') body = {'remote_ids': [uuid.uuid4().hex, uuid.uuid4().hex], 'description': uuid.uuid4().hex, 'enabled': _enabled} body = {'identity_provider': body} resp = self.patch(url, body=body) updated_idp = self._fetch_attribute_from_response(resp, 'identity_provider') body = body['identity_provider'] for key in body.keys(): if isinstance(body[key], list): self.assertEqual(sorted(body[key]), sorted(updated_idp.get(key))) else: self.assertEqual(body[key], updated_idp.get(key)) resp = self.get(url) updated_idp = self._fetch_attribute_from_response(resp, 'identity_provider') for key in body.keys(): if isinstance(body[key], list): self.assertEqual(sorted(body[key]), sorted(updated_idp.get(key))) else: self.assertEqual(body[key], updated_idp.get(key)) def test_update_idp_immutable_attributes(self): """Update IdP's immutable parameters. Expect HTTP BAD REQUEST. """ default_resp = self._create_default_idp() default_idp = self._fetch_attribute_from_response(default_resp, 'identity_provider') idp_id = default_idp.get('id') self.assertIsNotNone(idp_id) body = self._http_idp_input() body['id'] = uuid.uuid4().hex body['protocols'] = [uuid.uuid4().hex, uuid.uuid4().hex] url = self.base_url(suffix=idp_id) self.patch(url, body={'identity_provider': body}, expected_status=http_client.BAD_REQUEST) def test_update_nonexistent_idp(self): """Update nonexistent IdP Expect HTTP 404 Not Found code. """ idp_id = uuid.uuid4().hex url = self.base_url(suffix=idp_id) body = self._http_idp_input() body['enabled'] = False body = {'identity_provider': body} self.patch(url, body=body, expected_status=http_client.NOT_FOUND) def test_assign_protocol_to_idp(self): """Assign a protocol to existing IdP.""" self._assign_protocol_to_idp(expected_status=http_client.CREATED) def test_protocol_composite_pk(self): """Test that Keystone can add two entities. The entities have identical names, however, attached to different IdPs. 1. Add IdP and assign it protocol with predefined name 2. Add another IdP and assign it a protocol with same name. Expect HTTP 201 code """ url = self.base_url(suffix='%(idp_id)s/protocols/%(protocol_id)s') kwargs = {'expected_status': http_client.CREATED} self._assign_protocol_to_idp(proto='saml2', url=url, **kwargs) self._assign_protocol_to_idp(proto='saml2', url=url, **kwargs) def test_protocol_idp_pk_uniqueness(self): """Test whether Keystone checks for unique idp/protocol values. Add same protocol twice, expect Keystone to reject a latter call and return HTTP 409 Conflict code. """ url = self.base_url(suffix='%(idp_id)s/protocols/%(protocol_id)s') kwargs = {'expected_status': http_client.CREATED} resp, idp_id, proto = self._assign_protocol_to_idp(proto='saml2', url=url, **kwargs) kwargs = {'expected_status': http_client.CONFLICT} resp, idp_id, proto = self._assign_protocol_to_idp(idp_id=idp_id, proto='saml2', validate=False, url=url, **kwargs) def test_assign_protocol_to_nonexistent_idp(self): """Assign protocol to IdP that doesn't exist. Expect HTTP 404 Not Found code. """ idp_id = uuid.uuid4().hex kwargs = {'expected_status': http_client.NOT_FOUND} self._assign_protocol_to_idp(proto='saml2', idp_id=idp_id, validate=False, **kwargs) def test_get_protocol(self): """Create and later fetch protocol tied to IdP.""" resp, idp_id, proto = self._assign_protocol_to_idp( expected_status=http_client.CREATED) proto_id = self._fetch_attribute_from_response(resp, 'protocol')['id'] url = "%s/protocols/%s" % (idp_id, proto_id) url = self.base_url(suffix=url) resp = self.get(url) reference = {'id': proto_id} self.assertValidResponse(resp, 'protocol', dummy_validator, keys_to_check=reference.keys(), ref=reference) def test_list_protocols(self): """Create set of protocols and later list them. Compare input and output id sets. """ resp, idp_id, proto = self._assign_protocol_to_idp( expected_status=http_client.CREATED) iterations = random.randint(0, 16) protocol_ids = [] for _ in range(iterations): resp, _, proto = self._assign_protocol_to_idp( idp_id=idp_id, expected_status=http_client.CREATED) proto_id = self._fetch_attribute_from_response(resp, 'protocol') proto_id = proto_id['id'] protocol_ids.append(proto_id) url = "%s/protocols" % idp_id url = self.base_url(suffix=url) resp = self.get(url) self.assertValidListResponse(resp, 'protocols', dummy_validator, keys_to_check=['id']) entities = self._fetch_attribute_from_response(resp, 'protocols') entities = set([entity['id'] for entity in entities]) protocols_intersection = entities.intersection(protocol_ids) self.assertEqual(protocols_intersection, set(protocol_ids)) def test_update_protocols_attribute(self): """Update protocol's attribute.""" resp, idp_id, proto = self._assign_protocol_to_idp( expected_status=http_client.CREATED) new_mapping_id = uuid.uuid4().hex url = "%s/protocols/%s" % (idp_id, proto) url = self.base_url(suffix=url) body = {'mapping_id': new_mapping_id} resp = self.patch(url, body={'protocol': body}) self.assertValidResponse(resp, 'protocol', dummy_validator, keys_to_check=['id', 'mapping_id'], ref={'id': proto, 'mapping_id': new_mapping_id} ) def test_delete_protocol(self): """Delete protocol. Expect HTTP 404 Not Found code for the GET call after the protocol is deleted. """ url = self.base_url(suffix='/%(idp_id)s/' 'protocols/%(protocol_id)s') resp, idp_id, proto = self._assign_protocol_to_idp( expected_status=http_client.CREATED) url = url % {'idp_id': idp_id, 'protocol_id': proto} self.delete(url) self.get(url, expected_status=http_client.NOT_FOUND) class MappingCRUDTests(test_v3.RestfulTestCase): """A class for testing CRUD operations for Mappings.""" MAPPING_URL = '/OS-FEDERATION/mappings/' def assertValidMappingListResponse(self, resp, *args, **kwargs): return self.assertValidListResponse( resp, 'mappings', self.assertValidMapping, keys_to_check=[], *args, **kwargs) def assertValidMappingResponse(self, resp, *args, **kwargs): return self.assertValidResponse( resp, 'mapping', self.assertValidMapping, keys_to_check=[], *args, **kwargs) def assertValidMapping(self, entity, ref=None): self.assertIsNotNone(entity.get('id')) self.assertIsNotNone(entity.get('rules')) if ref: self.assertEqual(entity['rules'], ref['rules']) return entity def _create_default_mapping_entry(self): url = self.MAPPING_URL + uuid.uuid4().hex resp = self.put(url, body={'mapping': mapping_fixtures.MAPPING_LARGE}, expected_status=http_client.CREATED) return resp def _get_id_from_response(self, resp): r = resp.result.get('mapping') return r.get('id') def test_mapping_create(self): resp = self._create_default_mapping_entry() self.assertValidMappingResponse(resp, mapping_fixtures.MAPPING_LARGE) def test_mapping_list(self): url = self.MAPPING_URL self._create_default_mapping_entry() resp = self.get(url) entities = resp.result.get('mappings') self.assertIsNotNone(entities) self.assertResponseStatus(resp, http_client.OK) self.assertValidListLinks(resp.result.get('links')) self.assertEqual(1, len(entities)) def test_mapping_delete(self): url = self.MAPPING_URL + '%(mapping_id)s' resp = self._create_default_mapping_entry() mapping_id = self._get_id_from_response(resp) url = url % {'mapping_id': str(mapping_id)} resp = self.delete(url) self.assertResponseStatus(resp, http_client.NO_CONTENT) self.get(url, expected_status=http_client.NOT_FOUND) def test_mapping_get(self): url = self.MAPPING_URL + '%(mapping_id)s' resp = self._create_default_mapping_entry() mapping_id = self._get_id_from_response(resp) url = url % {'mapping_id': mapping_id} resp = self.get(url) self.assertValidMappingResponse(resp, mapping_fixtures.MAPPING_LARGE) def test_mapping_update(self): url = self.MAPPING_URL + '%(mapping_id)s' resp = self._create_default_mapping_entry() mapping_id = self._get_id_from_response(resp) url = url % {'mapping_id': mapping_id} resp = self.patch(url, body={'mapping': mapping_fixtures.MAPPING_SMALL}) self.assertValidMappingResponse(resp, mapping_fixtures.MAPPING_SMALL) resp = self.get(url) self.assertValidMappingResponse(resp, mapping_fixtures.MAPPING_SMALL) def test_delete_mapping_dne(self): url = self.MAPPING_URL + uuid.uuid4().hex self.delete(url, expected_status=http_client.NOT_FOUND) def test_get_mapping_dne(self): url = self.MAPPING_URL + uuid.uuid4().hex self.get(url, expected_status=http_client.NOT_FOUND) def test_create_mapping_bad_requirements(self): url = self.MAPPING_URL + uuid.uuid4().hex self.put(url, expected_status=http_client.BAD_REQUEST, body={'mapping': mapping_fixtures.MAPPING_BAD_REQ}) def test_create_mapping_no_rules(self): url = self.MAPPING_URL + uuid.uuid4().hex self.put(url, expected_status=http_client.BAD_REQUEST, body={'mapping': mapping_fixtures.MAPPING_NO_RULES}) def test_create_mapping_no_remote_objects(self): url = self.MAPPING_URL + uuid.uuid4().hex self.put(url, expected_status=http_client.BAD_REQUEST, body={'mapping': mapping_fixtures.MAPPING_NO_REMOTE}) def test_create_mapping_bad_value(self): url = self.MAPPING_URL + uuid.uuid4().hex self.put(url, expected_status=http_client.BAD_REQUEST, body={'mapping': mapping_fixtures.MAPPING_BAD_VALUE}) def test_create_mapping_missing_local(self): url = self.MAPPING_URL + uuid.uuid4().hex self.put(url, expected_status=http_client.BAD_REQUEST, body={'mapping': mapping_fixtures.MAPPING_MISSING_LOCAL}) def test_create_mapping_missing_type(self): url = self.MAPPING_URL + uuid.uuid4().hex self.put(url, expected_status=http_client.BAD_REQUEST, body={'mapping': mapping_fixtures.MAPPING_MISSING_TYPE}) def test_create_mapping_wrong_type(self): url = self.MAPPING_URL + uuid.uuid4().hex self.put(url, expected_status=http_client.BAD_REQUEST, body={'mapping': mapping_fixtures.MAPPING_WRONG_TYPE}) def test_create_mapping_extra_remote_properties_not_any_of(self): url = self.MAPPING_URL + uuid.uuid4().hex mapping = mapping_fixtures.MAPPING_EXTRA_REMOTE_PROPS_NOT_ANY_OF self.put(url, expected_status=http_client.BAD_REQUEST, body={'mapping': mapping}) def test_create_mapping_extra_remote_properties_any_one_of(self): url = self.MAPPING_URL + uuid.uuid4().hex mapping = mapping_fixtures.MAPPING_EXTRA_REMOTE_PROPS_ANY_ONE_OF self.put(url, expected_status=http_client.BAD_REQUEST, body={'mapping': mapping}) def test_create_mapping_extra_remote_properties_just_type(self): url = self.MAPPING_URL + uuid.uuid4().hex mapping = mapping_fixtures.MAPPING_EXTRA_REMOTE_PROPS_JUST_TYPE self.put(url, expected_status=http_client.BAD_REQUEST, body={'mapping': mapping}) def test_create_mapping_empty_map(self): url = self.MAPPING_URL + uuid.uuid4().hex self.put(url, expected_status=http_client.BAD_REQUEST, body={'mapping': {}}) def test_create_mapping_extra_rules_properties(self): url = self.MAPPING_URL + uuid.uuid4().hex self.put(url, expected_status=http_client.BAD_REQUEST, body={'mapping': mapping_fixtures.MAPPING_EXTRA_RULES_PROPS}) def test_create_mapping_with_blacklist_and_whitelist(self): """Test for adding whitelist and blacklist in the rule Server should respond with HTTP 400 Bad Request error upon discovering both ``whitelist`` and ``blacklist`` keywords in the same rule. """ url = self.MAPPING_URL + uuid.uuid4().hex mapping = mapping_fixtures.MAPPING_GROUPS_WHITELIST_AND_BLACKLIST self.put(url, expected_status=http_client.BAD_REQUEST, body={'mapping': mapping}) def test_create_mapping_with_local_user_and_local_domain(self): url = self.MAPPING_URL + uuid.uuid4().hex resp = self.put( url, body={ 'mapping': mapping_fixtures.MAPPING_LOCAL_USER_LOCAL_DOMAIN }, expected_status=http_client.CREATED) self.assertValidMappingResponse( resp, mapping_fixtures.MAPPING_LOCAL_USER_LOCAL_DOMAIN) def test_create_mapping_with_ephemeral(self): url = self.MAPPING_URL + uuid.uuid4().hex resp = self.put( url, body={'mapping': mapping_fixtures.MAPPING_EPHEMERAL_USER}, expected_status=http_client.CREATED) self.assertValidMappingResponse( resp, mapping_fixtures.MAPPING_EPHEMERAL_USER) def test_create_mapping_with_bad_user_type(self): url = self.MAPPING_URL + uuid.uuid4().hex # get a copy of a known good map bad_mapping = copy.deepcopy(mapping_fixtures.MAPPING_EPHEMERAL_USER) # now sabotage the user type bad_mapping['rules'][0]['local'][0]['user']['type'] = uuid.uuid4().hex self.put(url, expected_status=http_client.BAD_REQUEST, body={'mapping': bad_mapping}) class FederatedTokenTests(test_v3.RestfulTestCase, FederatedSetupMixin): def auth_plugin_config_override(self): methods = ['saml2'] super(FederatedTokenTests, self).auth_plugin_config_override(methods) def setUp(self): super(FederatedTokenTests, self).setUp() self._notifications = [] def fake_saml_notify(action, context, user_id, group_ids, identity_provider, protocol, token_id, outcome): note = { 'action': action, 'user_id': user_id, 'identity_provider': identity_provider, 'protocol': protocol, 'send_notification_called': True} self._notifications.append(note) self.useFixture(mockpatch.PatchObject( notifications, 'send_saml_audit_notification', fake_saml_notify)) def _assert_last_notify(self, action, identity_provider, protocol, user_id=None): self.assertTrue(self._notifications) note = self._notifications[-1] if user_id: self.assertEqual(note['user_id'], user_id) self.assertEqual(note['action'], action) self.assertEqual(note['identity_provider'], identity_provider) self.assertEqual(note['protocol'], protocol) self.assertTrue(note['send_notification_called']) def load_fixtures(self, fixtures): super(FederatedTokenTests, self).load_fixtures(fixtures) self.load_federation_sample_data() def test_issue_unscoped_token_notify(self): self._issue_unscoped_token() self._assert_last_notify(self.ACTION, self.IDP, self.PROTOCOL) def test_issue_unscoped_token(self): r = self._issue_unscoped_token() self.assertIsNotNone(r.headers.get('X-Subject-Token')) self.assertValidMappedUser(r.json['token']) def test_issue_unscoped_token_disabled_idp(self): """Checks if authentication works with disabled identity providers. Test plan: 1) Disable default IdP 2) Try issuing unscoped token for that IdP 3) Expect server to forbid authentication """ enabled_false = {'enabled': False} self.federation_api.update_idp(self.IDP, enabled_false) self.assertRaises(exception.Forbidden, self._issue_unscoped_token) def test_issue_unscoped_token_group_names_in_mapping(self): r = self._issue_unscoped_token(assertion='ANOTHER_CUSTOMER_ASSERTION') ref_groups = set([self.group_customers['id'], self.group_admins['id']]) token_resp = r.json_body token_groups = token_resp['token']['user']['OS-FEDERATION']['groups'] token_groups = set([group['id'] for group in token_groups]) self.assertEqual(ref_groups, token_groups) def test_issue_unscoped_tokens_nonexisting_group(self): self.assertRaises(exception.MissingGroups, self._issue_unscoped_token, assertion='ANOTHER_TESTER_ASSERTION') def test_issue_unscoped_token_with_remote_no_attribute(self): r = self._issue_unscoped_token(idp=self.IDP_WITH_REMOTE, environment={ self.REMOTE_ID_ATTR: self.REMOTE_IDS[0] }) self.assertIsNotNone(r.headers.get('X-Subject-Token')) def test_issue_unscoped_token_with_remote(self): self.config_fixture.config(group='federation', remote_id_attribute=self.REMOTE_ID_ATTR) r = self._issue_unscoped_token(idp=self.IDP_WITH_REMOTE, environment={ self.REMOTE_ID_ATTR: self.REMOTE_IDS[0] }) self.assertIsNotNone(r.headers.get('X-Subject-Token')) def test_issue_unscoped_token_with_saml2_remote(self): self.config_fixture.config(group='saml2', remote_id_attribute=self.REMOTE_ID_ATTR) r = self._issue_unscoped_token(idp=self.IDP_WITH_REMOTE, environment={ self.REMOTE_ID_ATTR: self.REMOTE_IDS[0] }) self.assertIsNotNone(r.headers.get('X-Subject-Token')) def test_issue_unscoped_token_with_remote_different(self): self.config_fixture.config(group='federation', remote_id_attribute=self.REMOTE_ID_ATTR) self.assertRaises(exception.Forbidden, self._issue_unscoped_token, idp=self.IDP_WITH_REMOTE, environment={ self.REMOTE_ID_ATTR: uuid.uuid4().hex }) def test_issue_unscoped_token_with_remote_default_overwritten(self): """Test that protocol remote_id_attribute has higher priority. Make sure the parameter stored under ``protocol`` section has higher priority over parameter from default ``federation`` configuration section. """ self.config_fixture.config(group='saml2', remote_id_attribute=self.REMOTE_ID_ATTR) self.config_fixture.config(group='federation', remote_id_attribute=uuid.uuid4().hex) r = self._issue_unscoped_token(idp=self.IDP_WITH_REMOTE, environment={ self.REMOTE_ID_ATTR: self.REMOTE_IDS[0] }) self.assertIsNotNone(r.headers.get('X-Subject-Token')) def test_issue_unscoped_token_with_remote_unavailable(self): self.config_fixture.config(group='federation', remote_id_attribute=self.REMOTE_ID_ATTR) self.assertRaises(exception.Unauthorized, self._issue_unscoped_token, idp=self.IDP_WITH_REMOTE, environment={ uuid.uuid4().hex: uuid.uuid4().hex }) def test_issue_unscoped_token_with_remote_user_as_empty_string(self): # make sure that REMOTE_USER set as the empty string won't interfere r = self._issue_unscoped_token(environment={'REMOTE_USER': ''}) self.assertIsNotNone(r.headers.get('X-Subject-Token')) def test_issue_unscoped_token_no_groups(self): self.assertRaises(exception.Unauthorized, self._issue_unscoped_token, assertion='BAD_TESTER_ASSERTION') def test_issue_unscoped_token_malformed_environment(self): """Test whether non string objects are filtered out. Put non string objects into the environment, inject correct assertion and try to get an unscoped token. Expect server not to fail on using split() method on non string objects and return token id in the HTTP header. """ api = auth_controllers.Auth() context = { 'environment': { 'malformed_object': object(), 'another_bad_idea': tuple(range(10)), 'yet_another_bad_param': dict(zip(uuid.uuid4().hex, range(32))) } } self._inject_assertion(context, 'EMPLOYEE_ASSERTION') r = api.authenticate_for_token(context, self.UNSCOPED_V3_SAML2_REQ) self.assertIsNotNone(r.headers.get('X-Subject-Token')) def test_scope_to_project_once_notify(self): r = self.v3_create_token( self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_EMPLOYEE) user_id = r.json['token']['user']['id'] self._assert_last_notify(self.ACTION, self.IDP, self.PROTOCOL, user_id) def test_scope_to_project_once(self): r = self.v3_create_token( self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_EMPLOYEE) token_resp = r.result['token'] project_id = token_resp['project']['id'] self._check_project_scoped_token_attributes(token_resp, project_id) roles_ref = [self.role_employee] projects_ref = self.proj_employees self._check_projects_and_roles(token_resp, roles_ref, projects_ref) self.assertValidMappedUser(token_resp) def test_scope_token_with_idp_disabled(self): """Scope token issued by disabled IdP. Try scoping the token issued by an IdP which is disabled now. Expect server to refuse scoping operation. This test confirms correct behaviour when IdP was enabled and unscoped token was issued, but disabled before user tries to scope the token. Here we assume the unscoped token was already issued and start from the moment where IdP is being disabled and unscoped token is being used. Test plan: 1) Disable IdP 2) Try scoping unscoped token """ enabled_false = {'enabled': False} self.federation_api.update_idp(self.IDP, enabled_false) self.v3_create_token( self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_CUSTOMER, expected_status=http_client.FORBIDDEN) def test_scope_to_bad_project(self): """Scope unscoped token with a project we don't have access to.""" self.v3_create_token( self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_CUSTOMER, expected_status=http_client.UNAUTHORIZED) def test_scope_to_project_multiple_times(self): """Try to scope the unscoped token multiple times. The new tokens should be scoped to: * Customers' project * Employees' project """ bodies = (self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_ADMIN, self.TOKEN_SCOPE_PROJECT_CUSTOMER_FROM_ADMIN) project_ids = (self.proj_employees['id'], self.proj_customers['id']) for body, project_id_ref in zip(bodies, project_ids): r = self.v3_create_token(body) token_resp = r.result['token'] self._check_project_scoped_token_attributes(token_resp, project_id_ref) def test_scope_to_project_with_only_inherited_roles(self): """Try to scope token whose only roles are inherited.""" self.config_fixture.config(group='os_inherit', enabled=True) r = self.v3_create_token( self.TOKEN_SCOPE_PROJECT_INHERITED_FROM_CUSTOMER) token_resp = r.result['token'] self._check_project_scoped_token_attributes( token_resp, self.project_inherited['id']) roles_ref = [self.role_customer] projects_ref = self.project_inherited self._check_projects_and_roles(token_resp, roles_ref, projects_ref) self.assertValidMappedUser(token_resp) def test_scope_token_from_nonexistent_unscoped_token(self): """Try to scope token from non-existent unscoped token.""" self.v3_create_token( self.TOKEN_SCOPE_PROJECT_FROM_NONEXISTENT_TOKEN, expected_status=http_client.NOT_FOUND) def test_issue_token_from_rules_without_user(self): api = auth_controllers.Auth() context = {'environment': {}} self._inject_assertion(context, 'BAD_TESTER_ASSERTION') self.assertRaises(exception.Unauthorized, api.authenticate_for_token, context, self.UNSCOPED_V3_SAML2_REQ) def test_issue_token_with_nonexistent_group(self): """Inject assertion that matches rule issuing bad group id. Expect server to find out that some groups are missing in the backend and raise exception.MappedGroupNotFound exception. """ self.assertRaises(exception.MappedGroupNotFound, self._issue_unscoped_token, assertion='CONTRACTOR_ASSERTION') def test_scope_to_domain_once(self): r = self.v3_create_token(self.TOKEN_SCOPE_DOMAIN_A_FROM_CUSTOMER) token_resp = r.result['token'] self._check_domain_scoped_token_attributes(token_resp, self.domainA['id']) def test_scope_to_domain_multiple_tokens(self): """Issue multiple tokens scoping to different domains. The new tokens should be scoped to: * domainA * domainB * domainC """ bodies = (self.TOKEN_SCOPE_DOMAIN_A_FROM_ADMIN, self.TOKEN_SCOPE_DOMAIN_B_FROM_ADMIN, self.TOKEN_SCOPE_DOMAIN_C_FROM_ADMIN) domain_ids = (self.domainA['id'], self.domainB['id'], self.domainC['id']) for body, domain_id_ref in zip(bodies, domain_ids): r = self.v3_create_token(body) token_resp = r.result['token'] self._check_domain_scoped_token_attributes(token_resp, domain_id_ref) def test_scope_to_domain_with_only_inherited_roles_fails(self): """Try to scope to a domain that has no direct roles.""" self.v3_create_token( self.TOKEN_SCOPE_DOMAIN_D_FROM_CUSTOMER, expected_status=http_client.UNAUTHORIZED) def test_list_projects(self): urls = ('/OS-FEDERATION/projects', '/auth/projects') token = (self.tokens['CUSTOMER_ASSERTION'], self.tokens['EMPLOYEE_ASSERTION'], self.tokens['ADMIN_ASSERTION']) self.config_fixture.config(group='os_inherit', enabled=True) projects_refs = (set([self.proj_customers['id'], self.project_inherited['id']]), set([self.proj_employees['id'], self.project_all['id']]), set([self.proj_employees['id'], self.project_all['id'], self.proj_customers['id'], self.project_inherited['id']])) for token, projects_ref in zip(token, projects_refs): for url in urls: r = self.get(url, token=token) projects_resp = r.result['projects'] projects = set(p['id'] for p in projects_resp) self.assertEqual(projects_ref, projects, 'match failed for url %s' % url) # TODO(samueldmq): Create another test class for role inheritance tests. # The advantage would be to reduce the complexity of this test class and # have tests specific to this functionality grouped, easing readability and # maintenability. def test_list_projects_for_inherited_project_assignment(self): # Enable os_inherit extension self.config_fixture.config(group='os_inherit', enabled=True) # Create a subproject subproject_inherited = unit.new_project_ref( domain_id=self.domainD['id'], parent_id=self.project_inherited['id']) self.resource_api.create_project(subproject_inherited['id'], subproject_inherited) # Create an inherited role assignment self.assignment_api.create_grant( role_id=self.role_employee['id'], group_id=self.group_employees['id'], project_id=self.project_inherited['id'], inherited_to_projects=True) # Define expected projects from employee assertion, which contain # the created subproject expected_project_ids = [self.project_all['id'], self.proj_employees['id'], subproject_inherited['id']] # Assert expected projects for both available URLs for url in ('/OS-FEDERATION/projects', '/auth/projects'): r = self.get(url, token=self.tokens['EMPLOYEE_ASSERTION']) project_ids = [project['id'] for project in r.result['projects']] self.assertEqual(len(expected_project_ids), len(project_ids)) for expected_project_id in expected_project_ids: self.assertIn(expected_project_id, project_ids, 'Projects match failed for url %s' % url) def test_list_domains(self): urls = ('/OS-FEDERATION/domains', '/auth/domains') tokens = (self.tokens['CUSTOMER_ASSERTION'], self.tokens['EMPLOYEE_ASSERTION'], self.tokens['ADMIN_ASSERTION']) # NOTE(henry-nash): domain D does not appear in the expected results # since it only had inherited roles (which only apply to projects # within the domain) domain_refs = (set([self.domainA['id']]), set([self.domainA['id'], self.domainB['id']]), set([self.domainA['id'], self.domainB['id'], self.domainC['id']])) for token, domains_ref in zip(tokens, domain_refs): for url in urls: r = self.get(url, token=token) domains_resp = r.result['domains'] domains = set(p['id'] for p in domains_resp) self.assertEqual(domains_ref, domains, 'match failed for url %s' % url) @utils.wip('This will fail because of bug #1501032. The returned method' 'list should contain "saml2". This is documented in bug ' '1501032.') def test_full_workflow(self): """Test 'standard' workflow for granting access tokens. * Issue unscoped token * List available projects based on groups * Scope token to one of available projects """ r = self._issue_unscoped_token() token_resp = r.json_body['token'] # NOTE(lbragstad): Ensure only 'saml2' is in the method list. self.assertListEqual(['saml2'], token_resp['methods']) self.assertValidMappedUser(token_resp) employee_unscoped_token_id = r.headers.get('X-Subject-Token') r = self.get('/auth/projects', token=employee_unscoped_token_id) projects = r.result['projects'] random_project = random.randint(0, len(projects)) - 1 project = projects[random_project] v3_scope_request = self._scope_request(employee_unscoped_token_id, 'project', project['id']) r = self.v3_create_token(v3_scope_request) token_resp = r.result['token'] # FIXME(lbragstad): 'token' should be in the list of methods returned # but it isn't. This is documented in bug 1501032. self.assertIn('token', token_resp['methods']) self.assertIn('saml2', token_resp['methods']) self._check_project_scoped_token_attributes(token_resp, project['id']) def test_workflow_with_groups_deletion(self): """Test full workflow with groups deletion before token scoping. The test scenario is as follows: - Create group ``group`` - Create and assign roles to ``group`` and ``project_all`` - Patch mapping rules for existing IdP so it issues group id - Issue unscoped token with ``group``'s id - Delete group ``group`` - Scope token to ``project_all`` - Expect HTTP 500 response """ # create group and role group = unit.new_group_ref(domain_id=self.domainA['id']) group = self.identity_api.create_group(group) role = unit.new_role_ref() self.role_api.create_role(role['id'], role) # assign role to group and project_admins self.assignment_api.create_grant(role['id'], group_id=group['id'], project_id=self.project_all['id']) rules = { 'rules': [ { 'local': [ { 'group': { 'id': group['id'] } }, { 'user': { 'name': '{0}' } } ], 'remote': [ { 'type': 'UserName' }, { 'type': 'LastName', 'any_one_of': [ 'Account' ] } ] } ] } self.federation_api.update_mapping(self.mapping['id'], rules) r = self._issue_unscoped_token(assertion='TESTER_ASSERTION') token_id = r.headers.get('X-Subject-Token') # delete group self.identity_api.delete_group(group['id']) # scope token to project_all, expect HTTP 500 scoped_token = self._scope_request( token_id, 'project', self.project_all['id']) self.v3_create_token( scoped_token, expected_status=http_client.INTERNAL_SERVER_ERROR) def test_lists_with_missing_group_in_backend(self): """Test a mapping that points to a group that does not exist For explicit mappings, we expect the group to exist in the backend, but for lists, specifically blacklists, a missing group is expected as many groups will be specified by the IdP that are not Keystone groups. The test scenario is as follows: - Create group ``EXISTS`` - Set mapping rules for existing IdP with a blacklist that passes through as REMOTE_USER_GROUPS - Issue unscoped token with on group ``EXISTS`` id in it """ domain_id = self.domainA['id'] domain_name = self.domainA['name'] group = unit.new_group_ref(domain_id=domain_id, name='EXISTS') group = self.identity_api.create_group(group) rules = { 'rules': [ { "local": [ { "user": { "name": "{0}", "id": "{0}" } } ], "remote": [ { "type": "REMOTE_USER" } ] }, { "local": [ { "groups": "{0}", "domain": {"name": domain_name} } ], "remote": [ { "type": "REMOTE_USER_GROUPS", } ] } ] } self.federation_api.update_mapping(self.mapping['id'], rules) def test_empty_blacklist_passess_all_values(self): """Test a mapping with empty blacklist specified Not adding a ``blacklist`` keyword to the mapping rules has the same effect as adding an empty ``blacklist``. In both cases, the mapping engine will not discard any groups that are associated with apache environment variables. This test checks scenario where an empty blacklist was specified. Expected result is to allow any value. The test scenario is as follows: - Create group ``EXISTS`` - Create group ``NO_EXISTS`` - Set mapping rules for existing IdP with a blacklist that passes through as REMOTE_USER_GROUPS - Issue unscoped token with groups ``EXISTS`` and ``NO_EXISTS`` assigned """ domain_id = self.domainA['id'] domain_name = self.domainA['name'] # Add a group "EXISTS" group_exists = unit.new_group_ref(domain_id=domain_id, name='EXISTS') group_exists = self.identity_api.create_group(group_exists) # Add a group "NO_EXISTS" group_no_exists = unit.new_group_ref(domain_id=domain_id, name='NO_EXISTS') group_no_exists = self.identity_api.create_group(group_no_exists) group_ids = set([group_exists['id'], group_no_exists['id']]) rules = { 'rules': [ { "local": [ { "user": { "name": "{0}", "id": "{0}" } } ], "remote": [ { "type": "REMOTE_USER" } ] }, { "local": [ { "groups": "{0}", "domain": {"name": domain_name} } ], "remote": [ { "type": "REMOTE_USER_GROUPS", "blacklist": [] } ] } ] } self.federation_api.update_mapping(self.mapping['id'], rules) r = self._issue_unscoped_token(assertion='UNMATCHED_GROUP_ASSERTION') assigned_group_ids = r.json['token']['user']['OS-FEDERATION']['groups'] self.assertEqual(len(group_ids), len(assigned_group_ids)) for group in assigned_group_ids: self.assertIn(group['id'], group_ids) def test_not_adding_blacklist_passess_all_values(self): """Test a mapping without blacklist specified. Not adding a ``blacklist`` keyword to the mapping rules has the same effect as adding an empty ``blacklist``. In both cases all values will be accepted and passed. This test checks scenario where an blacklist was not specified. Expected result is to allow any value. The test scenario is as follows: - Create group ``EXISTS`` - Create group ``NO_EXISTS`` - Set mapping rules for existing IdP with a blacklist that passes through as REMOTE_USER_GROUPS - Issue unscoped token with on groups ``EXISTS`` and ``NO_EXISTS`` assigned """ domain_id = self.domainA['id'] domain_name = self.domainA['name'] # Add a group "EXISTS" group_exists = unit.new_group_ref(domain_id=domain_id, name='EXISTS') group_exists = self.identity_api.create_group(group_exists) # Add a group "NO_EXISTS" group_no_exists = unit.new_group_ref(domain_id=domain_id, name='NO_EXISTS') group_no_exists = self.identity_api.create_group(group_no_exists) group_ids = set([group_exists['id'], group_no_exists['id']]) rules = { 'rules': [ { "local": [ { "user": { "name": "{0}", "id": "{0}" } } ], "remote": [ { "type": "REMOTE_USER" } ] }, { "local": [ { "groups": "{0}", "domain": {"name": domain_name} } ], "remote": [ { "type": "REMOTE_USER_GROUPS", } ] } ] } self.federation_api.update_mapping(self.mapping['id'], rules) r = self._issue_unscoped_token(assertion='UNMATCHED_GROUP_ASSERTION') assigned_group_ids = r.json['token']['user']['OS-FEDERATION']['groups'] self.assertEqual(len(group_ids), len(assigned_group_ids)) for group in assigned_group_ids: self.assertIn(group['id'], group_ids) def test_empty_whitelist_discards_all_values(self): """Test that empty whitelist blocks all the values Not adding a ``whitelist`` keyword to the mapping value is different than adding empty whitelist. The former case will simply pass all the values, whereas the latter would discard all the values. This test checks scenario where an empty whitelist was specified. The expected result is that no groups are matched. The test scenario is as follows: - Create group ``EXISTS`` - Set mapping rules for existing IdP with an empty whitelist that whould discard any values from the assertion - Try issuing unscoped token, expect server to raise ``exception.MissingGroups`` as no groups were matched and ephemeral user does not have any group assigned. """ domain_id = self.domainA['id'] domain_name = self.domainA['name'] group = unit.new_group_ref(domain_id=domain_id, name='EXISTS') group = self.identity_api.create_group(group) rules = { 'rules': [ { "local": [ { "user": { "name": "{0}", "id": "{0}" } } ], "remote": [ { "type": "REMOTE_USER" } ] }, { "local": [ { "groups": "{0}", "domain": {"name": domain_name} } ], "remote": [ { "type": "REMOTE_USER_GROUPS", "whitelist": [] } ] } ] } self.federation_api.update_mapping(self.mapping['id'], rules) self.assertRaises(exception.MissingGroups, self._issue_unscoped_token, assertion='UNMATCHED_GROUP_ASSERTION') def test_not_setting_whitelist_accepts_all_values(self): """Test that not setting whitelist passes Not adding a ``whitelist`` keyword to the mapping value is different than adding empty whitelist. The former case will simply pass all the values, whereas the latter would discard all the values. This test checks a scenario where a ``whitelist`` was not specified. Expected result is that no groups are ignored. The test scenario is as follows: - Create group ``EXISTS`` - Set mapping rules for existing IdP with an empty whitelist that whould discard any values from the assertion - Issue an unscoped token and make sure ephemeral user is a member of two groups. """ domain_id = self.domainA['id'] domain_name = self.domainA['name'] # Add a group "EXISTS" group_exists = unit.new_group_ref(domain_id=domain_id, name='EXISTS') group_exists = self.identity_api.create_group(group_exists) # Add a group "NO_EXISTS" group_no_exists = unit.new_group_ref(domain_id=domain_id, name='NO_EXISTS') group_no_exists = self.identity_api.create_group(group_no_exists) group_ids = set([group_exists['id'], group_no_exists['id']]) rules = { 'rules': [ { "local": [ { "user": { "name": "{0}", "id": "{0}" } } ], "remote": [ { "type": "REMOTE_USER" } ] }, { "local": [ { "groups": "{0}", "domain": {"name": domain_name} } ], "remote": [ { "type": "REMOTE_USER_GROUPS", } ] } ] } self.federation_api.update_mapping(self.mapping['id'], rules) r = self._issue_unscoped_token(assertion='UNMATCHED_GROUP_ASSERTION') assigned_group_ids = r.json['token']['user']['OS-FEDERATION']['groups'] self.assertEqual(len(group_ids), len(assigned_group_ids)) for group in assigned_group_ids: self.assertIn(group['id'], group_ids) def test_assertion_prefix_parameter(self): """Test parameters filtering based on the prefix. With ``assertion_prefix`` set to fixed, non default value, issue an unscoped token from assertion EMPLOYEE_ASSERTION_PREFIXED. Expect server to return unscoped token. """ self.config_fixture.config(group='federation', assertion_prefix=self.ASSERTION_PREFIX) r = self._issue_unscoped_token(assertion='EMPLOYEE_ASSERTION_PREFIXED') self.assertIsNotNone(r.headers.get('X-Subject-Token')) def test_assertion_prefix_parameter_expect_fail(self): """Test parameters filtering based on the prefix. With ``assertion_prefix`` default value set to empty string issue an unscoped token from assertion EMPLOYEE_ASSERTION. Next, configure ``assertion_prefix`` to value ``UserName``. Try issuing unscoped token with EMPLOYEE_ASSERTION. Expect server to raise exception.Unathorized exception. """ r = self._issue_unscoped_token() self.assertIsNotNone(r.headers.get('X-Subject-Token')) self.config_fixture.config(group='federation', assertion_prefix='UserName') self.assertRaises(exception.Unauthorized, self._issue_unscoped_token) def test_v2_auth_with_federation_token_fails(self): """Test that using a federation token with v2 auth fails. If an admin sets up a federated Keystone environment, and a user incorrectly configures a service (like Nova) to only use v2 auth, the returned message should be informative. """ r = self._issue_unscoped_token() token_id = r.headers.get('X-Subject-Token') self.assertRaises(exception.Unauthorized, self.token_provider_api.validate_v2_token, token_id=token_id) def test_unscoped_token_has_user_domain(self): r = self._issue_unscoped_token() self._check_domains_are_valid(r.json_body['token']) def test_scoped_token_has_user_domain(self): r = self.v3_create_token( self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_EMPLOYEE) self._check_domains_are_valid(r.result['token']) def test_issue_unscoped_token_for_local_user(self): r = self._issue_unscoped_token(assertion='LOCAL_USER_ASSERTION') token_resp = r.json_body['token'] self.assertListEqual(['saml2'], token_resp['methods']) self.assertEqual(self.user['id'], token_resp['user']['id']) self.assertEqual(self.user['name'], token_resp['user']['name']) self.assertEqual(self.domain['id'], token_resp['user']['domain']['id']) # Make sure the token is not scoped self.assertNotIn('project', token_resp) self.assertNotIn('domain', token_resp) def test_issue_token_for_local_user_user_not_found(self): self.assertRaises(exception.Unauthorized, self._issue_unscoped_token, assertion='ANOTHER_LOCAL_USER_ASSERTION') class FernetFederatedTokenTests(test_v3.RestfulTestCase, FederatedSetupMixin): AUTH_METHOD = 'token' def load_fixtures(self, fixtures): super(FernetFederatedTokenTests, self).load_fixtures(fixtures) self.load_federation_sample_data() def config_overrides(self): super(FernetFederatedTokenTests, self).config_overrides() self.config_fixture.config(group='token', provider='fernet') self.useFixture(ksfixtures.KeyRepository(self.config_fixture)) def auth_plugin_config_override(self): methods = ['saml2', 'token', 'password'] super(FernetFederatedTokenTests, self).auth_plugin_config_override(methods) def test_federated_unscoped_token(self): resp = self._issue_unscoped_token() self.assertEqual(204, len(resp.headers['X-Subject-Token'])) self.assertValidMappedUser(resp.json_body['token']) def test_federated_unscoped_token_with_multiple_groups(self): assertion = 'ANOTHER_CUSTOMER_ASSERTION' resp = self._issue_unscoped_token(assertion=assertion) self.assertEqual(226, len(resp.headers['X-Subject-Token'])) self.assertValidMappedUser(resp.json_body['token']) def test_validate_federated_unscoped_token(self): resp = self._issue_unscoped_token() unscoped_token = resp.headers.get('X-Subject-Token') # assert that the token we received is valid self.get('/auth/tokens/', headers={'X-Subject-Token': unscoped_token}) def test_fernet_full_workflow(self): """Test 'standard' workflow for granting Fernet access tokens. * Issue unscoped token * List available projects based on groups * Scope token to one of available projects """ resp = self._issue_unscoped_token() self.assertValidMappedUser(resp.json_body['token']) unscoped_token = resp.headers.get('X-Subject-Token') resp = self.get('/auth/projects', token=unscoped_token) projects = resp.result['projects'] random_project = random.randint(0, len(projects)) - 1 project = projects[random_project] v3_scope_request = self._scope_request(unscoped_token, 'project', project['id']) resp = self.v3_create_token(v3_scope_request) token_resp = resp.result['token'] self._check_project_scoped_token_attributes(token_resp, project['id']) class FederatedTokenTestsMethodToken(FederatedTokenTests): """Test federation operation with unified scoping auth method. Test all the operations with auth method set to ``token`` as a new, unified way for scoping all the tokens. """ AUTH_METHOD = 'token' def auth_plugin_config_override(self): methods = ['saml2', 'token'] super(FederatedTokenTests, self).auth_plugin_config_override(methods) @utils.wip('This will fail because of bug #1501032. The returned method' 'list should contain "saml2". This is documented in bug ' '1501032.') def test_full_workflow(self): """Test 'standard' workflow for granting access tokens. * Issue unscoped token * List available projects based on groups * Scope token to one of available projects """ r = self._issue_unscoped_token() token_resp = r.json_body['token'] # NOTE(lbragstad): Ensure only 'saml2' is in the method list. self.assertListEqual(['saml2'], token_resp['methods']) self.assertValidMappedUser(token_resp) employee_unscoped_token_id = r.headers.get('X-Subject-Token') r = self.get('/auth/projects', token=employee_unscoped_token_id) projects = r.result['projects'] random_project = random.randint(0, len(projects)) - 1 project = projects[random_project] v3_scope_request = self._scope_request(employee_unscoped_token_id, 'project', project['id']) r = self.v3_authenticate_token(v3_scope_request) token_resp = r.result['token'] self.assertIn('token', token_resp['methods']) self.assertIn('saml2', token_resp['methods']) self._check_project_scoped_token_attributes(token_resp, project['id']) class FederatedUserTests(test_v3.RestfulTestCase, FederatedSetupMixin): """Tests for federated users Tests new shadow users functionality """ def auth_plugin_config_override(self): methods = ['saml2'] super(FederatedUserTests, self).auth_plugin_config_override(methods) def setUp(self): super(FederatedUserTests, self).setUp() def load_fixtures(self, fixtures): super(FederatedUserTests, self).load_fixtures(fixtures) self.load_federation_sample_data() def test_user_id_persistense(self): """Ensure user_id is persistend for multiple federated authn calls.""" r = self._issue_unscoped_token() user_id = r.json_body['token']['user']['id'] r = self._issue_unscoped_token() user_id2 = r.json_body['token']['user']['id'] self.assertEqual(user_id, user_id2) class JsonHomeTests(test_v3.RestfulTestCase, test_v3.JsonHomeTestMixin): JSON_HOME_DATA = { 'http://docs.openstack.org/api/openstack-identity/3/ext/OS-FEDERATION/' '1.0/rel/identity_provider': { 'href-template': '/OS-FEDERATION/identity_providers/{idp_id}', 'href-vars': { 'idp_id': 'http://docs.openstack.org/api/openstack-identity/3/' 'ext/OS-FEDERATION/1.0/param/idp_id' }, }, } def _is_xmlsec1_installed(): p = subprocess.Popen( ['which', 'xmlsec1'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) # invert the return code return not bool(p.wait()) def _load_xml(filename): with open(os.path.join(XMLDIR, filename), 'r') as xml: return xml.read() class SAMLGenerationTests(test_v3.RestfulTestCase): SP_AUTH_URL = ('http://beta.com:5000/v3/OS-FEDERATION/identity_providers' '/BETA/protocols/saml2/auth') ASSERTION_FILE = 'signed_saml2_assertion.xml' # The values of the following variables match the attributes values found # in ASSERTION_FILE ISSUER = 'https://acme.com/FIM/sps/openstack/saml20' RECIPIENT = 'http://beta.com/Shibboleth.sso/SAML2/POST' SUBJECT = 'test_user' SUBJECT_DOMAIN = 'user_domain' ROLES = ['admin', 'member'] PROJECT = 'development' PROJECT_DOMAIN = 'project_domain' SAML_GENERATION_ROUTE = '/auth/OS-FEDERATION/saml2' ECP_GENERATION_ROUTE = '/auth/OS-FEDERATION/saml2/ecp' ASSERTION_VERSION = "2.0" SERVICE_PROVDIER_ID = 'ACME' def sp_ref(self): ref = { 'auth_url': self.SP_AUTH_URL, 'enabled': True, 'description': uuid.uuid4().hex, 'sp_url': self.RECIPIENT, 'relay_state_prefix': CONF.saml.relay_state_prefix, } return ref def setUp(self): super(SAMLGenerationTests, self).setUp() self.signed_assertion = saml2.create_class_from_xml_string( saml.Assertion, _load_xml(self.ASSERTION_FILE)) self.sp = self.sp_ref() url = '/OS-FEDERATION/service_providers/' + self.SERVICE_PROVDIER_ID self.put(url, body={'service_provider': self.sp}, expected_status=http_client.CREATED) def test_samlize_token_values(self): """Test the SAML generator produces a SAML object. Test the SAML generator directly by passing known arguments, the result should be a SAML object that consistently includes attributes based on the known arguments that were passed in. """ with mock.patch.object(keystone_idp, '_sign_assertion', return_value=self.signed_assertion): generator = keystone_idp.SAMLGenerator() response = generator.samlize_token(self.ISSUER, self.RECIPIENT, self.SUBJECT, self.SUBJECT_DOMAIN, self.ROLES, self.PROJECT, self.PROJECT_DOMAIN) assertion = response.assertion self.assertIsNotNone(assertion) self.assertIsInstance(assertion, saml.Assertion) issuer = response.issuer self.assertEqual(self.RECIPIENT, response.destination) self.assertEqual(self.ISSUER, issuer.text) user_attribute = assertion.attribute_statement[0].attribute[0] self.assertEqual(self.SUBJECT, user_attribute.attribute_value[0].text) user_domain_attribute = ( assertion.attribute_statement[0].attribute[1]) self.assertEqual(self.SUBJECT_DOMAIN, user_domain_attribute.attribute_value[0].text) role_attribute = assertion.attribute_statement[0].attribute[2] for attribute_value in role_attribute.attribute_value: self.assertIn(attribute_value.text, self.ROLES) project_attribute = assertion.attribute_statement[0].attribute[3] self.assertEqual(self.PROJECT, project_attribute.attribute_value[0].text) project_domain_attribute = ( assertion.attribute_statement[0].attribute[4]) self.assertEqual(self.PROJECT_DOMAIN, project_domain_attribute.attribute_value[0].text) def test_verify_assertion_object(self): """Test that the Assertion object is built properly. The Assertion doesn't need to be signed in this test, so _sign_assertion method is patched and doesn't alter the assertion. """ with mock.patch.object(keystone_idp, '_sign_assertion', side_effect=lambda x: x): generator = keystone_idp.SAMLGenerator() response = generator.samlize_token(self.ISSUER, self.RECIPIENT, self.SUBJECT, self.SUBJECT_DOMAIN, self.ROLES, self.PROJECT, self.PROJECT_DOMAIN) assertion = response.assertion self.assertEqual(self.ASSERTION_VERSION, assertion.version) def test_valid_saml_xml(self): """Test the generated SAML object can become valid XML. Test the generator directly by passing known arguments, the result should be a SAML object that consistently includes attributes based on the known arguments that were passed in. """ with mock.patch.object(keystone_idp, '_sign_assertion', return_value=self.signed_assertion): generator = keystone_idp.SAMLGenerator() response = generator.samlize_token(self.ISSUER, self.RECIPIENT, self.SUBJECT, self.SUBJECT_DOMAIN, self.ROLES, self.PROJECT, self.PROJECT_DOMAIN) saml_str = response.to_string() response = etree.fromstring(saml_str) issuer = response[0] assertion = response[2] self.assertEqual(self.RECIPIENT, response.get('Destination')) self.assertEqual(self.ISSUER, issuer.text) user_attribute = assertion[4][0] self.assertEqual(self.SUBJECT, user_attribute[0].text) user_domain_attribute = assertion[4][1] self.assertEqual(self.SUBJECT_DOMAIN, user_domain_attribute[0].text) role_attribute = assertion[4][2] for attribute_value in role_attribute: self.assertIn(attribute_value.text, self.ROLES) project_attribute = assertion[4][3] self.assertEqual(self.PROJECT, project_attribute[0].text) project_domain_attribute = assertion[4][4] self.assertEqual(self.PROJECT_DOMAIN, project_domain_attribute[0].text) def test_assertion_using_explicit_namespace_prefixes(self): def mocked_subprocess_check_output(*popenargs, **kwargs): # the last option is the assertion file to be signed filename = popenargs[0][-1] with open(filename, 'r') as f: assertion_content = f.read() # since we are not testing the signature itself, we can return # the assertion as is without signing it return assertion_content with mock.patch.object(subprocess, 'check_output', side_effect=mocked_subprocess_check_output): generator = keystone_idp.SAMLGenerator() response = generator.samlize_token(self.ISSUER, self.RECIPIENT, self.SUBJECT, self.SUBJECT_DOMAIN, self.ROLES, self.PROJECT, self.PROJECT_DOMAIN) assertion_xml = response.assertion.to_string() # make sure we have the proper tag and prefix for the assertion # namespace self.assertIn('>> @wip('waiting on bug #000000') >>> def test(): >>> pass """ def _wip(f): @six.wraps(f) def run_test(*args, **kwargs): try: f(*args, **kwargs) except Exception: raise testcase.TestSkipped('work in progress test failed: ' + message) raise AssertionError('work in progress test passed: ' + message) return run_test return _wip keystone-9.0.0/keystone/tests/unit/test_credential.py0000664000567000056710000002502012701407102024220 0ustar jenkinsjenkins00000000000000# Copyright 2015 UnitedStack, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from keystoneclient.contrib.ec2 import utils as ec2_utils from six.moves import http_client from keystone.common import utils from keystone.contrib.ec2 import controllers from keystone import exception from keystone.tests import unit from keystone.tests.unit import default_fixtures from keystone.tests.unit.ksfixtures import database from keystone.tests.unit import rest CRED_TYPE_EC2 = controllers.CRED_TYPE_EC2 class V2CredentialEc2TestCase(rest.RestfulTestCase): def setUp(self): super(V2CredentialEc2TestCase, self).setUp() self.user_id = self.user_foo['id'] self.project_id = self.tenant_bar['id'] def _get_token_id(self, r): return r.result['access']['token']['id'] def _get_ec2_cred(self): uri = self._get_ec2_cred_uri() r = self.public_request(method='POST', token=self.get_scoped_token(), path=uri, body={'tenant_id': self.project_id}) return r.result['credential'] def _get_ec2_cred_uri(self): return '/v2.0/users/%s/credentials/OS-EC2' % self.user_id def test_ec2_cannot_get_non_ec2_credential(self): access_key = uuid.uuid4().hex cred_id = utils.hash_access_key(access_key) non_ec2_cred = unit.new_credential_ref( user_id=self.user_id, project_id=self.project_id) non_ec2_cred['id'] = cred_id self.credential_api.create_credential(cred_id, non_ec2_cred) # if access_key is not found, ec2 controller raises Unauthorized # exception path = '/'.join([self._get_ec2_cred_uri(), access_key]) self.public_request(method='GET', token=self.get_scoped_token(), path=path, expected_status=http_client.UNAUTHORIZED) def assertValidErrorResponse(self, r): # FIXME(wwwjfy): it's copied from test_v3.py. The logic of this method # in test_v2.py and test_v3.py (both are inherited from rest.py) has no # difference, so they should be refactored into one place. Also, the # function signatures in both files don't match the one in the parent # class in rest.py. resp = r.result self.assertIsNotNone(resp.get('error')) self.assertIsNotNone(resp['error'].get('code')) self.assertIsNotNone(resp['error'].get('title')) self.assertIsNotNone(resp['error'].get('message')) self.assertEqual(int(resp['error']['code']), r.status_code) def test_ec2_list_credentials(self): self._get_ec2_cred() uri = self._get_ec2_cred_uri() r = self.public_request(method='GET', token=self.get_scoped_token(), path=uri) cred_list = r.result['credentials'] self.assertEqual(1, len(cred_list)) # non-EC2 credentials won't be fetched non_ec2_cred = unit.new_credential_ref( user_id=self.user_id, project_id=self.project_id) non_ec2_cred['type'] = uuid.uuid4().hex self.credential_api.create_credential(non_ec2_cred['id'], non_ec2_cred) r = self.public_request(method='GET', token=self.get_scoped_token(), path=uri) cred_list_2 = r.result['credentials'] # still one element because non-EC2 credentials are not returned. self.assertEqual(1, len(cred_list_2)) self.assertEqual(cred_list[0], cred_list_2[0]) class V2CredentialEc2Controller(unit.TestCase): def setUp(self): super(V2CredentialEc2Controller, self).setUp() self.useFixture(database.Database()) self.load_backends() self.load_fixtures(default_fixtures) self.user_id = self.user_foo['id'] self.project_id = self.tenant_bar['id'] self.controller = controllers.Ec2Controller() self.blob, tmp_ref = unit.new_ec2_credential( user_id=self.user_id, project_id=self.project_id) self.creds_ref = (controllers.Ec2Controller ._convert_v3_to_ec2_credential(tmp_ref)) def test_signature_validate_no_host_port(self): """Test signature validation with the access/secret provided.""" access = self.blob['access'] secret = self.blob['secret'] signer = ec2_utils.Ec2Signer(secret) params = {'SignatureMethod': 'HmacSHA256', 'SignatureVersion': '2', 'AWSAccessKeyId': access} request = {'host': 'foo', 'verb': 'GET', 'path': '/bar', 'params': params} signature = signer.generate(request) sig_ref = {'access': access, 'signature': signature, 'host': 'foo', 'verb': 'GET', 'path': '/bar', 'params': params} # Now validate the signature based on the dummy request self.assertTrue(self.controller.check_signature(self.creds_ref, sig_ref)) def test_signature_validate_with_host_port(self): """Test signature validation when host is bound with port. Host is bound with a port, generally, the port here is not the standard port for the protocol, like '80' for HTTP and port 443 for HTTPS, the port is not omitted by the client library. """ access = self.blob['access'] secret = self.blob['secret'] signer = ec2_utils.Ec2Signer(secret) params = {'SignatureMethod': 'HmacSHA256', 'SignatureVersion': '2', 'AWSAccessKeyId': access} request = {'host': 'foo:8181', 'verb': 'GET', 'path': '/bar', 'params': params} signature = signer.generate(request) sig_ref = {'access': access, 'signature': signature, 'host': 'foo:8181', 'verb': 'GET', 'path': '/bar', 'params': params} # Now validate the signature based on the dummy request self.assertTrue(self.controller.check_signature(self.creds_ref, sig_ref)) def test_signature_validate_with_missed_host_port(self): """Test signature validation when host is bound with well-known port. Host is bound with a port, but the port is well-know port like '80' for HTTP and port 443 for HTTPS, sometimes, client library omit the port but then make the request with the port. see (How to create the string to sign): 'http://docs.aws.amazon.com/ general/latest/gr/signature-version-2.html'. Since "credentials['host']" is not set by client library but is taken from "req.host", so caused the differences. """ access = self.blob['access'] secret = self.blob['secret'] signer = ec2_utils.Ec2Signer(secret) params = {'SignatureMethod': 'HmacSHA256', 'SignatureVersion': '2', 'AWSAccessKeyId': access} # Omit the port to generate the signature. cnt_req = {'host': 'foo', 'verb': 'GET', 'path': '/bar', 'params': params} signature = signer.generate(cnt_req) sig_ref = {'access': access, 'signature': signature, 'host': 'foo:8080', 'verb': 'GET', 'path': '/bar', 'params': params} # Now validate the signature based on the dummy request # Check the signature again after omitting the port. self.assertTrue(self.controller.check_signature(self.creds_ref, sig_ref)) def test_signature_validate_no_signature(self): """Signature is not presented in signature reference data.""" access = self.blob['access'] params = {'SignatureMethod': 'HmacSHA256', 'SignatureVersion': '2', 'AWSAccessKeyId': access} sig_ref = {'access': access, 'signature': None, 'host': 'foo:8080', 'verb': 'GET', 'path': '/bar', 'params': params} # Now validate the signature based on the dummy request self.assertRaises(exception.Unauthorized, self.controller.check_signature, self.creds_ref, sig_ref) def test_signature_validate_invalid_signature(self): """Signature is not signed on the correct data.""" access = self.blob['access'] secret = self.blob['secret'] signer = ec2_utils.Ec2Signer(secret) params = {'SignatureMethod': 'HmacSHA256', 'SignatureVersion': '2', 'AWSAccessKeyId': access} request = {'host': 'bar', 'verb': 'GET', 'path': '/bar', 'params': params} signature = signer.generate(request) sig_ref = {'access': access, 'signature': signature, 'host': 'foo:8080', 'verb': 'GET', 'path': '/bar', 'params': params} # Now validate the signature based on the dummy request self.assertRaises(exception.Unauthorized, self.controller.check_signature, self.creds_ref, sig_ref) def test_check_non_admin_user(self): """Checking if user is admin causes uncaught error. When checking if a user is an admin, keystone.exception.Unauthorized is raised but not caught if the user is not an admin. """ # make a non-admin user context = {'is_admin': False, 'token_id': uuid.uuid4().hex} # check if user is admin # no exceptions should be raised self.controller._is_admin(context) keystone-9.0.0/keystone/tests/unit/__init__.py0000664000567000056710000000304612701407102022612 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import oslo_i18n import six if six.PY3: # NOTE(dstanek): This block will monkey patch libraries that are not # yet supported in Python3. We do this that that it is possible to # execute any tests at all. Without monkey patching modules the # tests will fail with import errors. import sys from unittest import mock # noqa: our import detection is naive? sys.modules['ldap'] = mock.Mock() sys.modules['ldap.controls'] = mock.Mock() sys.modules['ldap.dn'] = mock.Mock() sys.modules['ldap.filter'] = mock.Mock() sys.modules['ldap.modlist'] = mock.Mock() sys.modules['ldappool'] = mock.Mock() # NOTE(dstanek): oslo_i18n.enable_lazy() must be called before # keystone.i18n._() is called to ensure it has the desired lazy lookup # behavior. This includes cases, like keystone.exceptions, where # keystone.i18n._() is called at import time. oslo_i18n.enable_lazy() from keystone.tests.unit.core import * # noqa keystone-9.0.0/keystone/tests/unit/external/0000775000567000056710000000000012701407246022331 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/external/test_timeutils.py0000664000567000056710000000232012701407102025745 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from oslo_utils import timeutils import keystone.tests.unit as tests class TestTimeUtils(tests.BaseTestCase): def test_parsing_date_strings_returns_a_datetime(self): example_date_str = '2015-09-23T04:45:37.196621Z' dt = timeutils.parse_strtime(example_date_str, fmt=tests.TIME_FORMAT) self.assertIsInstance(dt, datetime.datetime) def test_parsing_invalid_date_strings_raises_a_ValueError(self): example_date_str = '' simple_format = '%Y' self.assertRaises(ValueError, timeutils.parse_strtime, example_date_str, fmt=simple_format) keystone-9.0.0/keystone/tests/unit/external/__init__.py0000664000567000056710000000000012701407102024417 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/external/README.rst0000664000567000056710000000103012701407102024001 0ustar jenkinsjenkins00000000000000This directory contains interface tests for external libraries. The goal is not to test every possible path through a library's code and get 100% coverage. It's to give us a level of confidence that their general interface remains the same through version upgrades. This gives us a place to put these tests without having to litter our own tests with assertions that are not directly related to the code under test. The expectations for the external library are all in one place so it makes it easier for us to find out what they are. keystone-9.0.0/keystone/tests/unit/test_backend_ldap_pool.py0000664000567000056710000002367412701407102025543 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- # Copyright 2012 OpenStack Foundation # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ldappool import mock from oslo_config import cfg from oslotest import mockpatch from keystone.common.ldap import core as ldap_core from keystone.identity.backends import ldap from keystone.tests import unit from keystone.tests.unit import fakeldap from keystone.tests.unit import test_backend_ldap CONF = cfg.CONF class LdapPoolCommonTestMixin(object): """LDAP pool specific common tests used here and in live tests.""" def cleanup_pools(self): ldap_core.PooledLDAPHandler.connection_pools.clear() def test_handler_with_use_pool_enabled(self): # by default use_pool and use_auth_pool is enabled in test pool config user_ref = self.identity_api.get_user(self.user_foo['id']) self.user_foo.pop('password') self.assertDictEqual(self.user_foo, user_ref) handler = ldap_core._get_connection(CONF.ldap.url, use_pool=True) self.assertIsInstance(handler, ldap_core.PooledLDAPHandler) @mock.patch.object(ldap_core.KeystoneLDAPHandler, 'connect') @mock.patch.object(ldap_core.KeystoneLDAPHandler, 'simple_bind_s') def test_handler_with_use_pool_not_enabled(self, bind_method, connect_method): self.config_fixture.config(group='ldap', use_pool=False) self.config_fixture.config(group='ldap', use_auth_pool=True) self.cleanup_pools() user_api = ldap.UserApi(CONF) handler = user_api.get_connection(user=None, password=None, end_user_auth=True) # use_auth_pool flag does not matter when use_pool is False # still handler is non pool version self.assertIsInstance(handler.conn, ldap_core.PythonLDAPHandler) @mock.patch.object(ldap_core.KeystoneLDAPHandler, 'connect') @mock.patch.object(ldap_core.KeystoneLDAPHandler, 'simple_bind_s') def test_handler_with_end_user_auth_use_pool_not_enabled(self, bind_method, connect_method): # by default use_pool is enabled in test pool config # now disabling use_auth_pool flag to test handler instance self.config_fixture.config(group='ldap', use_auth_pool=False) self.cleanup_pools() user_api = ldap.UserApi(CONF) handler = user_api.get_connection(user=None, password=None, end_user_auth=True) self.assertIsInstance(handler.conn, ldap_core.PythonLDAPHandler) # For end_user_auth case, flag should not be false otherwise # it will use, admin connections ldap pool handler = user_api.get_connection(user=None, password=None, end_user_auth=False) self.assertIsInstance(handler.conn, ldap_core.PooledLDAPHandler) def test_pool_size_set(self): # get related connection manager instance ldappool_cm = self.conn_pools[CONF.ldap.url] self.assertEqual(CONF.ldap.pool_size, ldappool_cm.size) def test_pool_retry_max_set(self): # get related connection manager instance ldappool_cm = self.conn_pools[CONF.ldap.url] self.assertEqual(CONF.ldap.pool_retry_max, ldappool_cm.retry_max) def test_pool_retry_delay_set(self): # just make one identity call to initiate ldap connection if not there self.identity_api.get_user(self.user_foo['id']) # get related connection manager instance ldappool_cm = self.conn_pools[CONF.ldap.url] self.assertEqual(CONF.ldap.pool_retry_delay, ldappool_cm.retry_delay) def test_pool_use_tls_set(self): # get related connection manager instance ldappool_cm = self.conn_pools[CONF.ldap.url] self.assertEqual(CONF.ldap.use_tls, ldappool_cm.use_tls) def test_pool_timeout_set(self): # get related connection manager instance ldappool_cm = self.conn_pools[CONF.ldap.url] self.assertEqual(CONF.ldap.pool_connection_timeout, ldappool_cm.timeout) def test_pool_use_pool_set(self): # get related connection manager instance ldappool_cm = self.conn_pools[CONF.ldap.url] self.assertEqual(CONF.ldap.use_pool, ldappool_cm.use_pool) def test_pool_connection_lifetime_set(self): # get related connection manager instance ldappool_cm = self.conn_pools[CONF.ldap.url] self.assertEqual(CONF.ldap.pool_connection_lifetime, ldappool_cm.max_lifetime) def test_max_connection_error_raised(self): who = CONF.ldap.user cred = CONF.ldap.password # get related connection manager instance ldappool_cm = self.conn_pools[CONF.ldap.url] ldappool_cm.size = 2 # 3rd connection attempt should raise Max connection error with ldappool_cm.connection(who, cred) as _: # conn1 with ldappool_cm.connection(who, cred) as _: # conn2 try: with ldappool_cm.connection(who, cred) as _: # conn3 _.unbind_s() self.fail() except Exception as ex: self.assertIsInstance(ex, ldappool.MaxConnectionReachedError) ldappool_cm.size = CONF.ldap.pool_size def test_pool_size_expands_correctly(self): who = CONF.ldap.user cred = CONF.ldap.password # get related connection manager instance ldappool_cm = self.conn_pools[CONF.ldap.url] ldappool_cm.size = 3 def _get_conn(): return ldappool_cm.connection(who, cred) # Open 3 connections first with _get_conn() as _: # conn1 self.assertEqual(1, len(ldappool_cm)) with _get_conn() as _: # conn2 self.assertEqual(2, len(ldappool_cm)) with _get_conn() as _: # conn2 _.unbind_ext_s() self.assertEqual(3, len(ldappool_cm)) # Then open 3 connections again and make sure size does not grow # over 3 with _get_conn() as _: # conn1 self.assertEqual(1, len(ldappool_cm)) with _get_conn() as _: # conn2 self.assertEqual(2, len(ldappool_cm)) with _get_conn() as _: # conn3 _.unbind_ext_s() self.assertEqual(3, len(ldappool_cm)) def test_password_change_with_pool(self): old_password = self.user_sna['password'] self.cleanup_pools() # authenticate so that connection is added to pool before password # change user_ref = self.identity_api.authenticate( context={}, user_id=self.user_sna['id'], password=self.user_sna['password']) self.user_sna.pop('password') self.user_sna['enabled'] = True self.assertDictEqual(self.user_sna, user_ref) new_password = 'new_password' user_ref['password'] = new_password self.identity_api.update_user(user_ref['id'], user_ref) # now authenticate again to make sure new password works with # connection pool user_ref2 = self.identity_api.authenticate( context={}, user_id=self.user_sna['id'], password=new_password) user_ref.pop('password') self.assertDictEqual(user_ref, user_ref2) # Authentication with old password would not work here as there # is only one connection in pool which get bind again with updated # password..so no old bind is maintained in this case. self.assertRaises(AssertionError, self.identity_api.authenticate, context={}, user_id=self.user_sna['id'], password=old_password) class LDAPIdentity(LdapPoolCommonTestMixin, test_backend_ldap.LDAPIdentity, unit.TestCase): """Executes tests in existing base class with pooled LDAP handler.""" def setUp(self): self.useFixture(mockpatch.PatchObject( ldap_core.PooledLDAPHandler, 'Connector', fakeldap.FakeLdapPool)) super(LDAPIdentity, self).setUp() self.addCleanup(self.cleanup_pools) # storing to local variable to avoid long references self.conn_pools = ldap_core.PooledLDAPHandler.connection_pools # super class loads db fixtures which establishes ldap connection # so adding dummy call to highlight connection pool initialization # as its not that obvious though its not needed here self.identity_api.get_user(self.user_foo['id']) def config_files(self): config_files = super(LDAPIdentity, self).config_files() config_files.append(unit.dirs.tests_conf('backend_ldap_pool.conf')) return config_files @mock.patch.object(ldap_core, 'utf8_encode') def test_utf8_encoded_is_used_in_pool(self, mocked_method): def side_effect(arg): return arg mocked_method.side_effect = side_effect # invalidate the cache to get utf8_encode function called. self.identity_api.get_user.invalidate(self.identity_api, self.user_foo['id']) self.identity_api.get_user(self.user_foo['id']) mocked_method.assert_any_call(CONF.ldap.user) mocked_method.assert_any_call(CONF.ldap.password) keystone-9.0.0/keystone/tests/unit/fakeldap.py0000664000567000056710000005732312701407102022631 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Fake LDAP server for test harness. This class does very little error checking, and knows nothing about ldap class definitions. It implements the minimum emulation of the python ldap library to work with keystone. """ import random import re import shelve import ldap from oslo_config import cfg from oslo_log import log import six from six import moves from keystone.common.ldap import core from keystone import exception SCOPE_NAMES = { ldap.SCOPE_BASE: 'SCOPE_BASE', ldap.SCOPE_ONELEVEL: 'SCOPE_ONELEVEL', ldap.SCOPE_SUBTREE: 'SCOPE_SUBTREE', } # http://msdn.microsoft.com/en-us/library/windows/desktop/aa366991(v=vs.85).aspx # noqa CONTROL_TREEDELETE = '1.2.840.113556.1.4.805' LOG = log.getLogger(__name__) CONF = cfg.CONF def _internal_attr(attr_name, value_or_values): def normalize_value(value): return core.utf8_decode(value) def normalize_dn(dn): # Capitalize the attribute names as an LDAP server might. # NOTE(blk-u): Special case for this tested value, used with # test_user_id_comma. The call to str2dn here isn't always correct # here, because `dn` is escaped for an LDAP filter. str2dn() normally # works only because there's no special characters in `dn`. if dn == 'cn=Doe\\5c, John,ou=Users,cn=example,cn=com': return 'CN=Doe\\, John,OU=Users,CN=example,CN=com' # NOTE(blk-u): Another special case for this tested value. When a # roleOccupant has an escaped comma, it gets converted to \2C. if dn == 'cn=Doe\\, John,ou=Users,cn=example,cn=com': return 'CN=Doe\\2C John,OU=Users,CN=example,CN=com' try: dn = ldap.dn.str2dn(core.utf8_encode(dn)) except ldap.DECODING_ERROR: # NOTE(amakarov): In case of IDs instead of DNs in group members # they must be handled as regular values. return normalize_value(dn) norm = [] for part in dn: name, val, i = part[0] name = core.utf8_decode(name) name = name.upper() name = core.utf8_encode(name) norm.append([(name, val, i)]) return core.utf8_decode(ldap.dn.dn2str(norm)) if attr_name in ('member', 'roleOccupant'): attr_fn = normalize_dn else: attr_fn = normalize_value if isinstance(value_or_values, list): return [attr_fn(x) for x in value_or_values] return [attr_fn(value_or_values)] def _match_query(query, attrs, attrs_checked): """Match an ldap query to an attribute dictionary. The characters &, |, and ! are supported in the query. No syntax checking is performed, so malformed queries will not work correctly. """ # cut off the parentheses inner = query[1:-1] if inner.startswith(('&', '|')): if inner[0] == '&': matchfn = all else: matchfn = any # cut off the & or | groups = _paren_groups(inner[1:]) return matchfn(_match_query(group, attrs, attrs_checked) for group in groups) if inner.startswith('!'): # cut off the ! and the nested parentheses return not _match_query(query[2:-1], attrs, attrs_checked) (k, _sep, v) = inner.partition('=') attrs_checked.add(k.lower()) return _match(k, v, attrs) def _paren_groups(source): """Split a string into parenthesized groups.""" count = 0 start = 0 result = [] for pos in moves.range(len(source)): if source[pos] == '(': if count == 0: start = pos count += 1 if source[pos] == ')': count -= 1 if count == 0: result.append(source[start:pos + 1]) return result def _match(key, value, attrs): """Match a given key and value against an attribute list.""" def match_with_wildcards(norm_val, val_list): # Case insensitive checking with wildcards if norm_val.startswith('*'): if norm_val.endswith('*'): # Is the string anywhere in the target? for x in val_list: if norm_val[1:-1] in x: return True else: # Is the string at the end of the target? for x in val_list: if (norm_val[1:] == x[len(x) - len(norm_val) + 1:]): return True elif norm_val.endswith('*'): # Is the string at the start of the target? for x in val_list: if norm_val[:-1] == x[:len(norm_val) - 1]: return True else: # Is the string an exact match? for x in val_list: if check_value == x: return True return False if key not in attrs: return False # This is a pure wild card search, so the answer must be yes! if value == '*': return True if key == 'serviceId': # for serviceId, the backend is returning a list of numbers # make sure we convert them to strings first before comparing # them str_sids = [six.text_type(x) for x in attrs[key]] return six.text_type(value) in str_sids if key != 'objectclass': check_value = _internal_attr(key, value)[0].lower() norm_values = list( _internal_attr(key, x)[0].lower() for x in attrs[key]) return match_with_wildcards(check_value, norm_values) # it is an objectclass check, so check subclasses values = _subs(value) for v in values: if v in attrs[key]: return True return False def _subs(value): """Returns a list of subclass strings. The strings represent the ldap objectclass plus any subclasses that inherit from it. Fakeldap doesn't know about the ldap object structure, so subclasses need to be defined manually in the dictionary below. """ subs = {'groupOfNames': ['keystoneTenant', 'keystoneRole', 'keystoneTenantRole']} if value in subs: return [value] + subs[value] return [value] server_fail = False class FakeShelve(dict): def sync(self): pass FakeShelves = {} PendingRequests = {} class FakeLdap(core.LDAPHandler): """Emulate the python-ldap API. The python-ldap API requires all strings to be UTF-8 encoded. This is assured by the caller of this interface (i.e. KeystoneLDAPHandler). However, internally this emulation MUST process and store strings in a canonical form which permits operations on characters. Encoded strings do not provide the ability to operate on characters. Therefore this emulation accepts UTF-8 encoded strings, decodes them to unicode for operations internal to this emulation, and encodes them back to UTF-8 when returning values from the emulation. """ __prefix = 'ldap:' def __init__(self, conn=None): super(FakeLdap, self).__init__(conn=conn) self._ldap_options = {ldap.OPT_DEREF: ldap.DEREF_NEVER} def connect(self, url, page_size=0, alias_dereferencing=None, use_tls=False, tls_cacertfile=None, tls_cacertdir=None, tls_req_cert='demand', chase_referrals=None, debug_level=None, use_pool=None, pool_size=None, pool_retry_max=None, pool_retry_delay=None, pool_conn_timeout=None, pool_conn_lifetime=None): if url.startswith('fake://memory'): if url not in FakeShelves: FakeShelves[url] = FakeShelve() self.db = FakeShelves[url] else: self.db = shelve.open(url[7:]) using_ldaps = url.lower().startswith("ldaps") if use_tls and using_ldaps: raise AssertionError('Invalid TLS / LDAPS combination') if use_tls: if tls_cacertfile: ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, tls_cacertfile) elif tls_cacertdir: ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, tls_cacertdir) if tls_req_cert in list(core.LDAP_TLS_CERTS.values()): ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, tls_req_cert) else: raise ValueError("invalid TLS_REQUIRE_CERT tls_req_cert=%s", tls_req_cert) if alias_dereferencing is not None: self.set_option(ldap.OPT_DEREF, alias_dereferencing) self.page_size = page_size self.use_pool = use_pool self.pool_size = pool_size self.pool_retry_max = pool_retry_max self.pool_retry_delay = pool_retry_delay self.pool_conn_timeout = pool_conn_timeout self.pool_conn_lifetime = pool_conn_lifetime def dn(self, dn): return core.utf8_decode(dn) def _dn_to_id_attr(self, dn): return core.utf8_decode(ldap.dn.str2dn(core.utf8_encode(dn))[0][0][0]) def _dn_to_id_value(self, dn): return core.utf8_decode(ldap.dn.str2dn(core.utf8_encode(dn))[0][0][1]) def key(self, dn): return '%s%s' % (self.__prefix, self.dn(dn)) def simple_bind_s(self, who='', cred='', serverctrls=None, clientctrls=None): """This method is ignored, but provided for compatibility.""" if server_fail: raise ldap.SERVER_DOWN whos = ['cn=Admin', CONF.ldap.user] if who in whos and cred in ['password', CONF.ldap.password]: return try: attrs = self.db[self.key(who)] except KeyError: LOG.debug('bind fail: who=%s not found', core.utf8_decode(who)) raise ldap.NO_SUCH_OBJECT db_password = None try: db_password = attrs['userPassword'][0] except (KeyError, IndexError): LOG.debug('bind fail: password for who=%s not found', core.utf8_decode(who)) raise ldap.INAPPROPRIATE_AUTH if cred != db_password: LOG.debug('bind fail: password for who=%s does not match', core.utf8_decode(who)) raise ldap.INVALID_CREDENTIALS def unbind_s(self): """This method is ignored, but provided for compatibility.""" if server_fail: raise ldap.SERVER_DOWN def add_s(self, dn, modlist): """Add an object with the specified attributes at dn.""" if server_fail: raise ldap.SERVER_DOWN id_attr_in_modlist = False id_attr = self._dn_to_id_attr(dn) id_value = self._dn_to_id_value(dn) # The LDAP API raises a TypeError if attr name is None. for k, dummy_v in modlist: if k is None: raise TypeError('must be string, not None. modlist=%s' % modlist) if k == id_attr: for val in dummy_v: if core.utf8_decode(val) == id_value: id_attr_in_modlist = True if not id_attr_in_modlist: LOG.debug('id_attribute=%(attr)s missing, attributes=%(attrs)s' % {'attr': id_attr, 'attrs': modlist}) raise ldap.NAMING_VIOLATION key = self.key(dn) LOG.debug('add item: dn=%(dn)s, attrs=%(attrs)s', { 'dn': core.utf8_decode(dn), 'attrs': modlist}) if key in self.db: LOG.debug('add item failed: dn=%s is already in store.', core.utf8_decode(dn)) raise ldap.ALREADY_EXISTS(dn) self.db[key] = {k: _internal_attr(k, v) for k, v in modlist} self.db.sync() def delete_s(self, dn): """Remove the ldap object at specified dn.""" return self.delete_ext_s(dn, serverctrls=[]) def _getChildren(self, dn): return [k for k, v in self.db.items() if re.match('%s.*,%s' % ( re.escape(self.__prefix), re.escape(self.dn(dn))), k)] def delete_ext_s(self, dn, serverctrls, clientctrls=None): """Remove the ldap object at specified dn.""" if server_fail: raise ldap.SERVER_DOWN try: if CONTROL_TREEDELETE in [c.controlType for c in serverctrls]: LOG.debug('FakeLdap subtree_delete item: dn=%s', core.utf8_decode(dn)) children = self._getChildren(dn) for c in children: del self.db[c] key = self.key(dn) LOG.debug('FakeLdap delete item: dn=%s', core.utf8_decode(dn)) del self.db[key] except KeyError: LOG.debug('delete item failed: dn=%s not found.', core.utf8_decode(dn)) raise ldap.NO_SUCH_OBJECT self.db.sync() def modify_s(self, dn, modlist): """Modify the object at dn using the attribute list. :param dn: an LDAP DN :param modlist: a list of tuples in the following form: ([MOD_ADD | MOD_DELETE | MOD_REPACE], attribute, value) """ if server_fail: raise ldap.SERVER_DOWN key = self.key(dn) LOG.debug('modify item: dn=%(dn)s attrs=%(attrs)s', { 'dn': core.utf8_decode(dn), 'attrs': modlist}) try: entry = self.db[key] except KeyError: LOG.debug('modify item failed: dn=%s not found.', core.utf8_decode(dn)) raise ldap.NO_SUCH_OBJECT for cmd, k, v in modlist: values = entry.setdefault(k, []) if cmd == ldap.MOD_ADD: v = _internal_attr(k, v) for x in v: if x in values: raise ldap.TYPE_OR_VALUE_EXISTS values += v elif cmd == ldap.MOD_REPLACE: values[:] = _internal_attr(k, v) elif cmd == ldap.MOD_DELETE: if v is None: if not values: LOG.debug('modify item failed: ' 'item has no attribute "%s" to delete', k) raise ldap.NO_SUCH_ATTRIBUTE values[:] = [] else: for val in _internal_attr(k, v): try: values.remove(val) except ValueError: LOG.debug('modify item failed: ' 'item has no attribute "%(k)s" with ' 'value "%(v)s" to delete', { 'k': k, 'v': val}) raise ldap.NO_SUCH_ATTRIBUTE else: LOG.debug('modify item failed: unknown command %s', cmd) raise NotImplementedError('modify_s action %s not' ' implemented' % cmd) self.db[key] = entry self.db.sync() def search_s(self, base, scope, filterstr='(objectClass=*)', attrlist=None, attrsonly=0): """Search for all matching objects under base using the query. Args: base -- dn to search under scope -- search scope (base, subtree, onelevel) filterstr -- filter objects by attrlist -- attrs to return. Returns all attrs if not specified """ if server_fail: raise ldap.SERVER_DOWN if (not filterstr) and (scope != ldap.SCOPE_BASE): raise AssertionError('Search without filter on onelevel or ' 'subtree scope') if scope == ldap.SCOPE_BASE: try: item_dict = self.db[self.key(base)] except KeyError: LOG.debug('search fail: dn not found for SCOPE_BASE') raise ldap.NO_SUCH_OBJECT results = [(base, item_dict)] elif scope == ldap.SCOPE_SUBTREE: # FIXME - LDAP search with SUBTREE scope must return the base # entry, but the code below does _not_. Unfortunately, there are # several tests that depend on this broken behavior, and fail # when the base entry is returned in the search results. The # fix is easy here, just initialize results as above for # the SCOPE_BASE case. # https://bugs.launchpad.net/keystone/+bug/1368772 try: item_dict = self.db[self.key(base)] except KeyError: LOG.debug('search fail: dn not found for SCOPE_SUBTREE') raise ldap.NO_SUCH_OBJECT results = [(base, item_dict)] extraresults = [(k[len(self.__prefix):], v) for k, v in self.db.items() if re.match('%s.*,%s' % (re.escape(self.__prefix), re.escape(self.dn(base))), k)] results.extend(extraresults) elif scope == ldap.SCOPE_ONELEVEL: def get_entries(): base_dn = ldap.dn.str2dn(core.utf8_encode(base)) base_len = len(base_dn) for k, v in self.db.items(): if not k.startswith(self.__prefix): continue k_dn_str = k[len(self.__prefix):] k_dn = ldap.dn.str2dn(core.utf8_encode(k_dn_str)) if len(k_dn) != base_len + 1: continue if k_dn[-base_len:] != base_dn: continue yield (k_dn_str, v) results = list(get_entries()) else: # openldap client/server raises PROTOCOL_ERROR for unexpected scope raise ldap.PROTOCOL_ERROR objects = [] for dn, attrs in results: # filter the objects by filterstr id_attr, id_val, _ = ldap.dn.str2dn(core.utf8_encode(dn))[0][0] id_attr = core.utf8_decode(id_attr) id_val = core.utf8_decode(id_val) match_attrs = attrs.copy() match_attrs[id_attr] = [id_val] attrs_checked = set() if not filterstr or _match_query(filterstr, match_attrs, attrs_checked): if (filterstr and (scope != ldap.SCOPE_BASE) and ('objectclass' not in attrs_checked)): raise AssertionError('No objectClass in search filter') # filter the attributes by attrlist attrs = {k: v for k, v in attrs.items() if not attrlist or k in attrlist} objects.append((dn, attrs)) return objects def set_option(self, option, invalue): self._ldap_options[option] = invalue def get_option(self, option): value = self._ldap_options.get(option) return value def search_ext(self, base, scope, filterstr='(objectClass=*)', attrlist=None, attrsonly=0, serverctrls=None, clientctrls=None, timeout=-1, sizelimit=0): if clientctrls is not None or timeout != -1 or sizelimit != 0: raise exception.NotImplemented() # only passing a single server control is supported by this fake ldap if len(serverctrls) > 1: raise exception.NotImplemented() # search_ext is async and returns an identifier used for # retrieving the results via result3(). This will be emulated by # storing the request in a variable with random integer key and # performing the real lookup in result3() msgid = random.randint(0, 1000) PendingRequests[msgid] = (base, scope, filterstr, attrlist, attrsonly, serverctrls) return msgid def result3(self, msgid=ldap.RES_ANY, all=1, timeout=None, resp_ctrl_classes=None): """Execute async request Only msgid param is supported. Request info is fetched from global variable `PendingRequests` by msgid, executed using search_s and limited if requested. """ if all != 1 or timeout is not None or resp_ctrl_classes is not None: raise exception.NotImplemented() params = PendingRequests[msgid] # search_s accepts a subset of parameters of search_ext, # that's why we use only the first 5. results = self.search_s(*params[:5]) # extract limit from serverctrl serverctrls = params[5] ctrl = serverctrls[0] if ctrl.size: rdata = results[:ctrl.size] else: rdata = results # real result3 returns various service info -- rtype, rmsgid, # serverctrls. Now this info is not used, so all this info is None rtype = None rmsgid = None serverctrls = None return (rtype, rdata, rmsgid, serverctrls) class FakeLdapPool(FakeLdap): """Emulate the python-ldap API with pooled connections. This class is used as connector class in PooledLDAPHandler. """ def __init__(self, uri, retry_max=None, retry_delay=None, conn=None): super(FakeLdapPool, self).__init__(conn=conn) self.url = uri self.connected = None self.conn = self self._connection_time = 5 # any number greater than 0 def get_lifetime(self): return self._connection_time def simple_bind_s(self, who=None, cred=None, serverctrls=None, clientctrls=None): if self.url.startswith('fakepool://memory'): if self.url not in FakeShelves: FakeShelves[self.url] = FakeShelve() self.db = FakeShelves[self.url] else: self.db = shelve.open(self.url[11:]) if not who: who = 'cn=Admin' if not cred: cred = 'password' super(FakeLdapPool, self).simple_bind_s(who=who, cred=cred, serverctrls=serverctrls, clientctrls=clientctrls) def unbind_ext_s(self): """Added to extend FakeLdap as connector class.""" pass class FakeLdapNoSubtreeDelete(FakeLdap): """FakeLdap subclass that does not support subtree delete Same as FakeLdap except delete will throw the LDAP error ldap.NOT_ALLOWED_ON_NONLEAF if there is an attempt to delete an entry that has children. """ def delete_ext_s(self, dn, serverctrls, clientctrls=None): """Remove the ldap object at specified dn.""" if server_fail: raise ldap.SERVER_DOWN try: children = self._getChildren(dn) if children: raise ldap.NOT_ALLOWED_ON_NONLEAF except KeyError: LOG.debug('delete item failed: dn=%s not found.', core.utf8_decode(dn)) raise ldap.NO_SUCH_OBJECT super(FakeLdapNoSubtreeDelete, self).delete_ext_s(dn, serverctrls, clientctrls) keystone-9.0.0/keystone/tests/unit/saml2/0000775000567000056710000000000012701407246021525 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/saml2/idp_saml2_metadata.xml0000664000567000056710000000456612701407102025763 0ustar jenkinsjenkins00000000000000 MIIDpTCCAo0CAREwDQYJKoZIhvcNAQEFBQAwgZ4xCjAIBgNVBAUTATUxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTESMBAGA1UEBxMJU3Vubnl2YWxlMRIwEAYDVQQKEwlPcGVuU3RhY2sxETAPBgNVBAsTCEtleXN0b25lMSUwIwYJKoZIhvcNAQkBFhZrZXlzdG9uZUBvcGVuc3RhY2sub3JnMRQwEgYDVQQDEwtTZWxmIFNpZ25lZDAgFw0xMzA3MDkxNjI1MDBaGA8yMDcyMDEwMTE2MjUwMFowgY8xCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTESMBAGA1UEBxMJU3Vubnl2YWxlMRIwEAYDVQQKEwlPcGVuU3RhY2sxETAPBgNVBAsTCEtleXN0b25lMSUwIwYJKoZIhvcNAQkBFhZrZXlzdG9uZUBvcGVuc3RhY2sub3JnMREwDwYDVQQDEwhLZXlzdG9uZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMTC6IdNd9Cg1DshcrT5gRVRF36nEmjSA9QWdik7B925PK70U4F6j4pz/5JL7plIo/8rJ4jJz9ccE7m0iA+IuABtEhEwXkG9rj47Oy0J4ZyDGSh2K1Bl78PA9zxXSzysUTSjBKdAh29dPYbJY7cgZJ0uC3AtfVceYiAOIi14SdFeZ0LZLDXBuLaqUmSMrmKwJ9wAMOCb/jbBP9/3Ycd0GYjlvrSBU4Bqb8/NHasyO4DpPN68OAoyD5r5jUtV8QZN03UjIsoux8e0lrL6+MVtJo0OfWvlSrlzS5HKSryY+uqqQEuxtZKpJM2MV85ujvjc8eDSChh2shhDjBem3FIlHKUCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAed9fHgdJrk+gZcO5gsqq6uURfDOuYD66GsSdZw4BqHjYAcnyWq2da+iw7Uxkqu7iLf2k4+Hu3xjDFrce479OwZkSnbXmqB7XspTGOuM8MgT7jB/ypKTOZ6qaZKSWK1Hta995hMrVVlhUNBLh0MPGqoVWYA4d7mblujgH9vp+4mpCciJagHks8K5FBmI+pobB+uFdSYDoRzX9LTpStspK4e3IoY8baILuGcdKimRNBv6ItG4hMrntAe1/nWMJyUu5rDTGf2V/vAaS0S/faJBwQSz1o38QHMTWHNspfwIdX3yMqI9u7/vYlz3rLy5WdBdUgZrZ3/VLmJTiJVZu5Owq4Q== openstack openstack openstack openstack first lastname admin@example.com 555-555-5555 keystone-9.0.0/keystone/tests/unit/saml2/signed_saml2_assertion.xml0000664000567000056710000001046012701407102026675 0ustar jenkinsjenkins00000000000000 https://acme.com/FIM/sps/openstack/saml20 Lem2TKyYt+/tJy2iSos1t0KxcJE= b//GXtGeCIJPFsMAHrx4+3yjrL4smSpRLXG9PB3TLMJvU4fx8n2PzK7+VbtWNbZG vSgbvbQR52jq77iyaRfQ2iELuFEY+YietLRi7hsitkJCEayPmU+BDlNIGuCXZjAy 7tmtGFkLlZZJaom1jAzHfZ5JPjZdM5hvQwrhCI2Kzyk= MIICtjCCAh+gAwIBAgIJAJTeBUN2i9ZNMA0GCSqGSIb3DQEBBQUAME4xCzAJBgNV BAYTAkhSMQ8wDQYDVQQIEwZaYWdyZWIxITAfBgNVBAoTGE5la2Egb3JnYW5pemFj aWphIGQuby5vLjELMAkGA1UEAxMCQ0EwHhcNMTIxMjI4MTYwODA1WhcNMTQxMjI4 MTYwODA1WjBvMQswCQYDVQQGEwJIUjEPMA0GA1UECBMGWmFncmViMQ8wDQYDVQQH EwZaYWdyZWIxITAfBgNVBAoTGE5la2Egb3JnYW5pemFjaWphIGQuby5vLjEbMBkG A1UEAxMSUHJvZ3JhbWVyc2thIGZpcm1hMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCB iQKBgQCgWApHV5cma0GY/v/vmwgciDQBgITcitx2rG0F+ghXtGiEJeK75VY7jQwE UFCbgV+AaOY2NQChK2FKec7Hss/5y+jbWfX2yVwX6TYcCwnOGXenz+cgx2Fwqpu3 ncL6dYJMfdbKvojBaJQLJTaNjRJsZACButDsDtXDSH9QaRy+hQIDAQABo3sweTAJ BgNVHRMEAjAAMCwGCWCGSAGG+EIBDQQfFh1PcGVuU1NMIEdlbmVyYXRlZCBDZXJ0 aWZpY2F0ZTAdBgNVHQ4EFgQUSo9ThP/MOg8QIRWxoPo8qKR8O2wwHwYDVR0jBBgw FoAUAelckr4bx8MwZ7y+VlHE46Mbo+cwDQYJKoZIhvcNAQEFBQADgYEAy19Z7Z5/ /MlWkogu41s0RxL9ffG60QQ0Y8hhDTmgHNx1itj0wT8pB7M4KVMbZ4hjjSFsfRq4 Vj7jm6LwU0WtZ3HGl8TygTh8AAJvbLROnTjLL5MqI9d9pKvIIfZ2Qs3xmJ7JEv4H UHeBXxQq/GmfBv3l+V5ObQ+EHKnyDodLHCk= test_user urn:oasis:names:tc:SAML:2.0:ac:classes:Password https://acme.com/FIM/sps/openstack/saml20 test_user user_domain admin member development project_domain keystone-9.0.0/keystone/tests/unit/test_contrib_simple_cert.py0000664000567000056710000000363512701407102026144 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from six.moves import http_client from keystone.tests.unit import test_v3 class BaseTestCase(test_v3.RestfulTestCase): CA_PATH = '/v3/OS-SIMPLE-CERT/ca' CERT_PATH = '/v3/OS-SIMPLE-CERT/certificates' class TestSimpleCert(BaseTestCase): def request_cert(self, path): content_type = 'application/x-pem-file' response = self.request(app=self.public_app, method='GET', path=path, headers={'Accept': content_type}, expected_status=http_client.OK) self.assertEqual(content_type, response.content_type.lower()) self.assertIn(b'---BEGIN', response.body) return response def test_ca_cert(self): self.request_cert(self.CA_PATH) def test_signing_cert(self): self.request_cert(self.CERT_PATH) def test_missing_file(self): # these files do not exist self.config_fixture.config(group='signing', ca_certs=uuid.uuid4().hex, certfile=uuid.uuid4().hex) for path in [self.CA_PATH, self.CERT_PATH]: self.request(app=self.public_app, method='GET', path=path, expected_status=http_client.INTERNAL_SERVER_ERROR) keystone-9.0.0/keystone/tests/unit/test_ldap_livetest.py0000664000567000056710000002053012701407102024746 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import subprocess import uuid import ldap.modlist from oslo_config import cfg from six.moves import range from keystone import exception from keystone.identity.backends import ldap as identity_ldap from keystone.tests import unit from keystone.tests.unit import test_backend_ldap CONF = cfg.CONF def create_object(dn, attrs): conn = ldap.initialize(CONF.ldap.url) conn.simple_bind_s(CONF.ldap.user, CONF.ldap.password) ldif = ldap.modlist.addModlist(attrs) conn.add_s(dn, ldif) conn.unbind_s() class LiveLDAPIdentity(test_backend_ldap.LDAPIdentity): def setUp(self): self._ldap_skip_live() super(LiveLDAPIdentity, self).setUp() def _ldap_skip_live(self): self.skip_if_env_not_set('ENABLE_LDAP_LIVE_TEST') def clear_database(self): devnull = open('/dev/null', 'w') subprocess.call(['ldapdelete', '-x', '-D', CONF.ldap.user, '-H', CONF.ldap.url, '-w', CONF.ldap.password, '-r', CONF.ldap.suffix], stderr=devnull) if CONF.ldap.suffix.startswith('ou='): tree_dn_attrs = {'objectclass': 'organizationalUnit', 'ou': 'openstack'} else: tree_dn_attrs = {'objectclass': ['dcObject', 'organizationalUnit'], 'dc': 'openstack', 'ou': 'openstack'} create_object(CONF.ldap.suffix, tree_dn_attrs) create_object(CONF.ldap.user_tree_dn, {'objectclass': 'organizationalUnit', 'ou': 'Users'}) create_object(CONF.ldap.role_tree_dn, {'objectclass': 'organizationalUnit', 'ou': 'Roles'}) create_object(CONF.ldap.group_tree_dn, {'objectclass': 'organizationalUnit', 'ou': 'UserGroups'}) def config_files(self): config_files = super(LiveLDAPIdentity, self).config_files() config_files.append(unit.dirs.tests_conf('backend_liveldap.conf')) return config_files def test_build_tree(self): """Regression test for building the tree names.""" # logic is different from the fake backend. user_api = identity_ldap.UserApi(CONF) self.assertTrue(user_api) self.assertEqual(user_api.tree_dn, CONF.ldap.user_tree_dn) def test_ldap_dereferencing(self): alt_users_ldif = {'objectclass': ['top', 'organizationalUnit'], 'ou': 'alt_users'} alt_fake_user_ldif = {'objectclass': ['person', 'inetOrgPerson'], 'cn': 'alt_fake1', 'sn': 'alt_fake1'} aliased_users_ldif = {'objectclass': ['alias', 'extensibleObject'], 'aliasedobjectname': "ou=alt_users,%s" % CONF.ldap.suffix} create_object("ou=alt_users,%s" % CONF.ldap.suffix, alt_users_ldif) create_object("%s=alt_fake1,ou=alt_users,%s" % (CONF.ldap.user_id_attribute, CONF.ldap.suffix), alt_fake_user_ldif) create_object("ou=alt_users,%s" % CONF.ldap.user_tree_dn, aliased_users_ldif) self.config_fixture.config(group='ldap', query_scope='sub', alias_dereferencing='never') self.identity_api = identity_ldap.Identity() self.assertRaises(exception.UserNotFound, self.identity_api.get_user, 'alt_fake1') self.config_fixture.config(group='ldap', alias_dereferencing='searching') self.identity_api = identity_ldap.Identity() user_ref = self.identity_api.get_user('alt_fake1') self.assertEqual('alt_fake1', user_ref['id']) self.config_fixture.config(group='ldap', alias_dereferencing='always') self.identity_api = identity_ldap.Identity() user_ref = self.identity_api.get_user('alt_fake1') self.assertEqual('alt_fake1', user_ref['id']) # FakeLDAP does not correctly process filters, so this test can only be # run against a live LDAP server def test_list_groups_for_user_filtered(self): domain = self._get_domain_fixture() test_groups = [] test_users = [] GROUP_COUNT = 3 USER_COUNT = 2 for x in range(0, USER_COUNT): # TODO(shaleh): use unit.new_user_ref() new_user = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex, 'enabled': True, 'domain_id': domain['id']} new_user = self.identity_api.create_user(new_user) test_users.append(new_user) positive_user = test_users[0] negative_user = test_users[1] for x in range(0, USER_COUNT): group_refs = self.identity_api.list_groups_for_user( test_users[x]['id']) self.assertEqual(0, len(group_refs)) for x in range(0, GROUP_COUNT): new_group = unit.new_group_ref(domain_id=domain['id']) new_group = self.identity_api.create_group(new_group) test_groups.append(new_group) group_refs = self.identity_api.list_groups_for_user( positive_user['id']) self.assertEqual(x, len(group_refs)) self.identity_api.add_user_to_group( positive_user['id'], new_group['id']) group_refs = self.identity_api.list_groups_for_user( positive_user['id']) self.assertEqual(x + 1, len(group_refs)) group_refs = self.identity_api.list_groups_for_user( negative_user['id']) self.assertEqual(0, len(group_refs)) driver = self.identity_api._select_identity_driver( CONF.identity.default_domain_id) driver.group.ldap_filter = '(dn=xx)' group_refs = self.identity_api.list_groups_for_user( positive_user['id']) self.assertEqual(0, len(group_refs)) group_refs = self.identity_api.list_groups_for_user( negative_user['id']) self.assertEqual(0, len(group_refs)) driver.group.ldap_filter = '(objectclass=*)' group_refs = self.identity_api.list_groups_for_user( positive_user['id']) self.assertEqual(GROUP_COUNT, len(group_refs)) group_refs = self.identity_api.list_groups_for_user( negative_user['id']) self.assertEqual(0, len(group_refs)) def test_user_enable_attribute_mask(self): self.config_fixture.config( group='ldap', user_enabled_emulation=False, user_enabled_attribute='employeeType') super(LiveLDAPIdentity, self).test_user_enable_attribute_mask() def test_create_project_case_sensitivity(self): # The attribute used for the live LDAP tests is case insensitive. def call_super(): (super(LiveLDAPIdentity, self). test_create_project_case_sensitivity()) self.assertRaises(exception.Conflict, call_super) def test_create_user_case_sensitivity(self): # The attribute used for the live LDAP tests is case insensitive. def call_super(): super(LiveLDAPIdentity, self).test_create_user_case_sensitivity() self.assertRaises(exception.Conflict, call_super) def test_project_update_missing_attrs_with_a_falsey_value(self): # The description attribute doesn't allow an empty value. def call_super(): (super(LiveLDAPIdentity, self). test_project_update_missing_attrs_with_a_falsey_value()) self.assertRaises(ldap.INVALID_SYNTAX, call_super) keystone-9.0.0/keystone/tests/unit/test_url_middleware.py0000664000567000056710000000375112701407102025114 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import webob from keystone import middleware from keystone.tests import unit class FakeApp(object): """Fakes a WSGI app URL normalized.""" def __call__(self, env, start_response): resp = webob.Response() resp.body = 'SUCCESS' return resp(env, start_response) class UrlMiddlewareTest(unit.TestCase): def setUp(self): self.middleware = middleware.NormalizingFilter(FakeApp()) self.response_status = None self.response_headers = None super(UrlMiddlewareTest, self).setUp() def start_fake_response(self, status, headers): self.response_status = int(status.split(' ', 1)[0]) self.response_headers = dict(headers) def test_trailing_slash_normalization(self): """Tests /v2.0/tokens and /v2.0/tokens/ normalized URLs match.""" req1 = webob.Request.blank('/v2.0/tokens') req2 = webob.Request.blank('/v2.0/tokens/') self.middleware(req1.environ, self.start_fake_response) self.middleware(req2.environ, self.start_fake_response) self.assertEqual(req1.path_url, req2.path_url) self.assertEqual('http://localhost/v2.0/tokens', req1.path_url) def test_rewrite_empty_path(self): """Tests empty path is rewritten to root.""" req = webob.Request.blank('') self.middleware(req.environ, self.start_fake_response) self.assertEqual('http://localhost/', req.path_url) keystone-9.0.0/keystone/tests/unit/identity_mapping.py0000664000567000056710000000154412701407102024420 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from keystone.common import sql from keystone.identity.mapping_backends import sql as mapping_sql def list_id_mappings(): """List all id_mappings for testing purposes.""" with sql.session_for_read() as session: refs = session.query(mapping_sql.IDMapping).all() return [x.to_dict() for x in refs] keystone-9.0.0/keystone/tests/unit/backend/0000775000567000056710000000000012701407246022076 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/backend/__init__.py0000664000567000056710000000000012701407102024164 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/backend/core_sql.py0000664000567000056710000000353512701407102024254 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy from keystone.common import sql from keystone.tests import unit from keystone.tests.unit import default_fixtures from keystone.tests.unit.ksfixtures import database class BaseBackendSqlTests(unit.SQLDriverOverrides, unit.TestCase): def setUp(self): super(BaseBackendSqlTests, self).setUp() self.useFixture(database.Database()) self.load_backends() # populate the engine with tables & fixtures self.load_fixtures(default_fixtures) # defaulted by the data load self.user_foo['enabled'] = True def config_files(self): config_files = super(BaseBackendSqlTests, self).config_files() config_files.append(unit.dirs.tests_conf('backend_sql.conf')) return config_files class BaseBackendSqlModels(BaseBackendSqlTests): def select_table(self, name): table = sqlalchemy.Table(name, sql.ModelBase.metadata, autoload=True) s = sqlalchemy.select([table]) return s def assertExpectedSchema(self, table, cols): table = self.select_table(table) for col, type_, length in cols: self.assertIsInstance(table.c[col].type, type_) if length: self.assertEqual(length, table.c[col].type.length) keystone-9.0.0/keystone/tests/unit/backend/core_ldap.py0000664000567000056710000001257712701407102024403 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ldap from oslo_config import cfg from keystone.common import cache from keystone.common import ldap as common_ldap from keystone.common.ldap import core as common_ldap_core from keystone.tests import unit from keystone.tests.unit import default_fixtures from keystone.tests.unit import fakeldap from keystone.tests.unit.ksfixtures import database CONF = cfg.CONF def create_group_container(identity_api): # Create the groups base entry (ou=Groups,cn=example,cn=com) group_api = identity_api.driver.group conn = group_api.get_connection() dn = 'ou=Groups,cn=example,cn=com' conn.add_s(dn, [('objectclass', ['organizationalUnit']), ('ou', ['Groups'])]) class BaseBackendLdapCommon(object): """Mixin class to set up generic LDAP backends.""" def setUp(self): super(BaseBackendLdapCommon, self).setUp() common_ldap.register_handler('fake://', fakeldap.FakeLdap) self.load_backends() self.load_fixtures(default_fixtures) self.addCleanup(common_ldap_core._HANDLERS.clear) self.addCleanup(self.clear_database) def _get_domain_fixture(self): """Domains in LDAP are read-only, so just return the static one.""" return self.resource_api.get_domain(CONF.identity.default_domain_id) def clear_database(self): for shelf in fakeldap.FakeShelves: fakeldap.FakeShelves[shelf].clear() def get_config(self, domain_id): # Only one conf structure unless we are using separate domain backends return CONF def config_overrides(self): super(BaseBackendLdapCommon, self).config_overrides() self.config_fixture.config(group='identity', driver='ldap') def config_files(self): config_files = super(BaseBackendLdapCommon, self).config_files() config_files.append(unit.dirs.tests_conf('backend_ldap.conf')) return config_files def get_user_enabled_vals(self, user): user_dn = ( self.identity_api.driver.user._id_to_dn_string(user['id'])) enabled_attr_name = CONF.ldap.user_enabled_attribute ldap_ = self.identity_api.driver.user.get_connection() res = ldap_.search_s(user_dn, ldap.SCOPE_BASE, u'(sn=%s)' % user['name']) if enabled_attr_name in res[0][1]: return res[0][1][enabled_attr_name] else: return None class BaseBackendLdap(object): """Mixin class to set up an all-LDAP configuration.""" def setUp(self): # NOTE(dstanek): The database must be setup prior to calling the # parent's setUp. The parent's setUp uses services (like # credentials) that require a database. self.useFixture(database.Database()) super(BaseBackendLdap, self).setUp() def load_fixtures(self, fixtures): # Override super impl since need to create group container. create_group_container(self.identity_api) super(BaseBackendLdap, self).load_fixtures(fixtures) class BaseBackendLdapIdentitySqlEverythingElse(unit.SQLDriverOverrides): """Mixin base for Identity LDAP, everything else SQL backend tests.""" def config_files(self): config_files = super(BaseBackendLdapIdentitySqlEverythingElse, self).config_files() config_files.append(unit.dirs.tests_conf('backend_ldap_sql.conf')) return config_files def setUp(self): sqldb = self.useFixture(database.Database()) super(BaseBackendLdapIdentitySqlEverythingElse, self).setUp() self.clear_database() self.load_backends() cache.configure_cache() sqldb.recreate() self.load_fixtures(default_fixtures) # defaulted by the data load self.user_foo['enabled'] = True def config_overrides(self): super(BaseBackendLdapIdentitySqlEverythingElse, self).config_overrides() self.config_fixture.config(group='identity', driver='ldap') self.config_fixture.config(group='resource', driver='sql') self.config_fixture.config(group='assignment', driver='sql') class BaseBackendLdapIdentitySqlEverythingElseWithMapping(object): """Mixin base class to test mapping of default LDAP backend. The default configuration is not to enable mapping when using a single backend LDAP driver. However, a cloud provider might want to enable the mapping, hence hiding the LDAP IDs from any clients of keystone. Setting backward_compatible_ids to False will enable this mapping. """ def config_overrides(self): super(BaseBackendLdapIdentitySqlEverythingElseWithMapping, self).config_overrides() self.config_fixture.config(group='identity_mapping', backward_compatible_ids=False) keystone-9.0.0/keystone/tests/unit/backend/legacy_drivers/0000775000567000056710000000000012701407246025100 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/backend/legacy_drivers/assignment/0000775000567000056710000000000012701407246027250 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/backend/legacy_drivers/assignment/__init__.py0000664000567000056710000000000012701407102031336 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/backend/legacy_drivers/assignment/V8/0000775000567000056710000000000012701407246027545 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/backend/legacy_drivers/assignment/V8/__init__.py0000664000567000056710000000000012701407102031633 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/backend/legacy_drivers/assignment/V8/sql.py0000664000567000056710000000304712701407102030711 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.tests.unit import test_backend_sql class SqlIdentityV8(test_backend_sql.SqlIdentity): """Test that a V8 driver still passes the same tests. We use the SQL driver as an example of a V8 legacy driver. """ def config_overrides(self): super(SqlIdentityV8, self).config_overrides() # V8 SQL specific driver overrides self.config_fixture.config( group='assignment', driver='keystone.assignment.V8_backends.sql.Assignment') self.use_specific_sql_driver_version( 'keystone.assignment', 'backends', 'V8_') def test_delete_project_assignments_same_id_as_domain(self): self.skipTest("V8 doesn't support project acting as a domain.") def test_delete_user_assignments_user_same_id_as_group(self): self.skipTest("Groups and users with the same ID are not supported.") def test_delete_group_assignments_group_same_id_as_user(self): self.skipTest("Groups and users with the same ID are not supported.") keystone-9.0.0/keystone/tests/unit/backend/legacy_drivers/resource/0000775000567000056710000000000012701407246026727 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/backend/legacy_drivers/resource/__init__.py0000664000567000056710000000000012701407102031015 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/backend/legacy_drivers/resource/V8/0000775000567000056710000000000012701407246027224 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/backend/legacy_drivers/resource/V8/__init__.py0000664000567000056710000000000012701407102031312 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/backend/legacy_drivers/resource/V8/sql.py0000664000567000056710000000503312701407102030365 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import unittest from keystone.resource.V8_backends import sql from keystone.tests import unit from keystone.tests.unit.ksfixtures import database from keystone.tests.unit.resource import test_backends from keystone.tests.unit import test_backend_sql class SqlIdentityV8(test_backend_sql.SqlIdentity): """Test that a V8 driver still passes the same tests. We use the SQL driver as an example of a V8 legacy driver. """ def config_overrides(self): super(SqlIdentityV8, self).config_overrides() # V8 SQL specific driver overrides self.config_fixture.config( group='resource', driver='keystone.resource.V8_backends.sql.Resource') self.use_specific_sql_driver_version( 'keystone.resource', 'backends', 'V8_') def test_delete_projects_from_ids(self): self.skipTest('Operation not supported in v8 and earlier drivers') def test_delete_projects_from_ids_with_no_existing_project_id(self): self.skipTest('Operation not supported in v8 and earlier drivers') def test_delete_project_cascade(self): self.skipTest('Operation not supported in v8 and earlier drivers') def test_delete_large_project_cascade(self): self.skipTest('Operation not supported in v8 and earlier drivers') def test_hidden_project_domain_root_is_really_hidden(self): self.skipTest('Operation not supported in v8 and earlier drivers') class TestSqlResourceDriverV8(unit.BaseTestCase, test_backends.ResourceDriverTests): def setUp(self): super(TestSqlResourceDriverV8, self).setUp() version_specifiers = { 'keystone.resource': { 'versionless_backend': 'backends', 'versioned_backend': 'V8_backends' } } self.useFixture(database.Database(version_specifiers)) self.driver = sql.Resource() @unittest.skip('Null domain not allowed.') def test_create_project_null_domain(self): pass keystone-9.0.0/keystone/tests/unit/backend/legacy_drivers/role/0000775000567000056710000000000012701407246026041 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/backend/legacy_drivers/role/__init__.py0000664000567000056710000000000012701407102030127 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/backend/legacy_drivers/role/V8/0000775000567000056710000000000012701407246026336 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/backend/legacy_drivers/role/V8/__init__.py0000664000567000056710000000000012701407102030424 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/backend/legacy_drivers/role/V8/sql.py0000664000567000056710000000217012701407102027476 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.tests.unit import test_backend_sql class SqlIdentityV8(test_backend_sql.SqlIdentity): """Test that a V8 driver still passes the same tests. We use the SQL driver as an example of a V8 legacy driver. """ def config_overrides(self): super(SqlIdentityV8, self).config_overrides() # V8 SQL specific driver overrides self.config_fixture.config( group='role', driver='keystone.assignment.V8_role_backends.sql.Role') self.use_specific_sql_driver_version( 'keystone.assignment', 'role_backends', 'V8_') keystone-9.0.0/keystone/tests/unit/backend/legacy_drivers/__init__.py0000664000567000056710000000000012701407102027166 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/backend/legacy_drivers/federation/0000775000567000056710000000000012701407246027220 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/backend/legacy_drivers/federation/__init__.py0000664000567000056710000000000012701407102031306 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/backend/legacy_drivers/federation/V8/0000775000567000056710000000000012701407246027515 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/backend/legacy_drivers/federation/V8/__init__.py0000664000567000056710000000000012701407102031603 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/backend/legacy_drivers/federation/V8/api_v3.py0000664000567000056710000000774212701407102031251 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from six.moves import http_client from keystone.tests.unit import test_v3_federation class FederatedSetupMixinV8(object): def useV8driver(self): # We use the SQL driver as an example V8 driver, so override # the current driver with that version. self.config_fixture.config( group='federation', driver='keystone.federation.V8_backends.sql.Federation') self.use_specific_sql_driver_version( 'keystone.federation', 'backends', 'V8_') class FederatedIdentityProviderTestsV8( test_v3_federation.FederatedIdentityProviderTests, FederatedSetupMixinV8): """Test that a V8 driver still passes the same tests.""" def config_overrides(self): super(FederatedIdentityProviderTestsV8, self).config_overrides() self.useV8driver() def test_create_idp_remote_repeated(self): """Creates two IdentityProvider entities with some remote_ids A remote_id is the same for both so the second IdP is not created because of the uniqueness of the remote_ids Expect HTTP 409 Conflict code for the latter call. Note: V9 drivers and later augment the conflict message with additional information, which won't be present if we are running a V8 driver - so override the newer tests to just ensure a conflict message is raised. """ body = self.default_body.copy() repeated_remote_id = uuid.uuid4().hex body['remote_ids'] = [uuid.uuid4().hex, uuid.uuid4().hex, uuid.uuid4().hex, repeated_remote_id] self._create_default_idp(body=body) url = self.base_url(suffix=uuid.uuid4().hex) body['remote_ids'] = [uuid.uuid4().hex, repeated_remote_id] self.put(url, body={'identity_provider': body}, expected_status=http_client.CONFLICT) def test_check_idp_uniqueness(self): """Add same IdP twice. Expect HTTP 409 Conflict code for the latter call. Note: V9 drivers and later augment the conflict message with additional information, which won't be present if we are running a V8 driver - so override the newer tests to just ensure a conflict message is raised. """ url = self.base_url(suffix=uuid.uuid4().hex) body = self._http_idp_input() self.put(url, body={'identity_provider': body}, expected_status=http_client.CREATED) self.put(url, body={'identity_provider': body}, expected_status=http_client.CONFLICT) class MappingCRUDTestsV8( test_v3_federation.MappingCRUDTests, FederatedSetupMixinV8): """Test that a V8 driver still passes the same tests.""" def config_overrides(self): super(MappingCRUDTestsV8, self).config_overrides() self.useV8driver() class ServiceProviderTestsV8( test_v3_federation.ServiceProviderTests, FederatedSetupMixinV8): """Test that a V8 driver still passes the same tests.""" def config_overrides(self): super(ServiceProviderTestsV8, self).config_overrides() self.useV8driver() def test_filter_list_sp_by_id(self): self.skipTest('Operation not supported in v8 and earlier drivers') def test_filter_list_sp_by_enabled(self): self.skipTest('Operation not supported in v8 and earlier drivers') keystone-9.0.0/keystone/tests/unit/test_auth_plugin.py0000664000567000056710000001726212701407102024436 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import mock from keystone import auth from keystone import exception from keystone.tests import unit # for testing purposes only METHOD_NAME = 'simple_challenge_response' EXPECTED_RESPONSE = uuid.uuid4().hex DEMO_USER_ID = uuid.uuid4().hex class SimpleChallengeResponse(auth.AuthMethodHandler): def authenticate(self, context, auth_payload, user_context): if 'response' in auth_payload: if auth_payload['response'] != EXPECTED_RESPONSE: raise exception.Unauthorized('Wrong answer') user_context['user_id'] = DEMO_USER_ID else: return {"challenge": "What's the name of your high school?"} class TestAuthPlugin(unit.SQLDriverOverrides, unit.TestCase): def setUp(self): super(TestAuthPlugin, self).setUp() self.load_backends() self.api = auth.controllers.Auth() def config_overrides(self): super(TestAuthPlugin, self).config_overrides() method_opts = { METHOD_NAME: 'keystone.tests.unit.test_auth_plugin.SimpleChallengeResponse', } self.auth_plugin_config_override( methods=['external', 'password', 'token', METHOD_NAME], **method_opts) def test_unsupported_auth_method(self): method_name = uuid.uuid4().hex auth_data = {'methods': [method_name]} auth_data[method_name] = {'test': 'test'} auth_data = {'identity': auth_data} self.assertRaises(exception.AuthMethodNotSupported, auth.controllers.AuthInfo.create, None, auth_data) def test_addition_auth_steps(self): auth_data = {'methods': [METHOD_NAME]} auth_data[METHOD_NAME] = { 'test': 'test'} auth_data = {'identity': auth_data} auth_info = auth.controllers.AuthInfo.create(None, auth_data) auth_context = {'extras': {}, 'method_names': []} try: self.api.authenticate({'environment': {}}, auth_info, auth_context) except exception.AdditionalAuthRequired as e: self.assertIn('methods', e.authentication) self.assertIn(METHOD_NAME, e.authentication['methods']) self.assertIn(METHOD_NAME, e.authentication) self.assertIn('challenge', e.authentication[METHOD_NAME]) # test correct response auth_data = {'methods': [METHOD_NAME]} auth_data[METHOD_NAME] = { 'response': EXPECTED_RESPONSE} auth_data = {'identity': auth_data} auth_info = auth.controllers.AuthInfo.create(None, auth_data) auth_context = {'extras': {}, 'method_names': []} self.api.authenticate({'environment': {}}, auth_info, auth_context) self.assertEqual(DEMO_USER_ID, auth_context['user_id']) # test incorrect response auth_data = {'methods': [METHOD_NAME]} auth_data[METHOD_NAME] = { 'response': uuid.uuid4().hex} auth_data = {'identity': auth_data} auth_info = auth.controllers.AuthInfo.create(None, auth_data) auth_context = {'extras': {}, 'method_names': []} self.assertRaises(exception.Unauthorized, self.api.authenticate, {'environment': {}}, auth_info, auth_context) def test_duplicate_method(self): # Having the same method twice doesn't cause load_auth_methods to fail. self.auth_plugin_config_override( methods=['external', 'external']) self.clear_auth_plugin_registry() auth.controllers.load_auth_methods() self.assertIn('external', auth.controllers.AUTH_METHODS) class TestAuthPluginDynamicOptions(TestAuthPlugin): def config_overrides(self): super(TestAuthPluginDynamicOptions, self).config_overrides() # Clear the override for the [auth] ``methods`` option so it is # possible to load the options from the config file. self.config_fixture.conf.clear_override('methods', group='auth') def config_files(self): config_files = super(TestAuthPluginDynamicOptions, self).config_files() config_files.append(unit.dirs.tests_conf('test_auth_plugin.conf')) return config_files class TestMapped(unit.TestCase): def setUp(self): super(TestMapped, self).setUp() self.load_backends() self.api = auth.controllers.Auth() def config_files(self): config_files = super(TestMapped, self).config_files() config_files.append(unit.dirs.tests_conf('test_auth_plugin.conf')) return config_files def auth_plugin_config_override(self, methods=None, **method_classes): # Do not apply the auth plugin overrides so that the config file is # tested pass def _test_mapped_invocation_with_method_name(self, method_name): with mock.patch.object(auth.plugins.mapped.Mapped, 'authenticate', return_value=None) as authenticate: context = {'environment': {}} auth_data = { 'identity': { 'methods': [method_name], method_name: {'protocol': method_name}, } } auth_info = auth.controllers.AuthInfo.create(context, auth_data) auth_context = {'extras': {}, 'method_names': [], 'user_id': uuid.uuid4().hex} self.api.authenticate(context, auth_info, auth_context) # make sure Mapped plugin got invoked with the correct payload ((context, auth_payload, auth_context), kwargs) = authenticate.call_args self.assertEqual(method_name, auth_payload['protocol']) def test_mapped_with_remote_user(self): with mock.patch.object(auth.plugins.mapped.Mapped, 'authenticate', return_value=None) as authenticate: # external plugin should fail and pass to mapped plugin method_name = 'saml2' auth_data = {'methods': [method_name]} # put the method name in the payload so its easier to correlate # method name with payload auth_data[method_name] = {'protocol': method_name} auth_data = {'identity': auth_data} auth_info = auth.controllers.AuthInfo.create(None, auth_data) auth_context = {'extras': {}, 'method_names': [], 'user_id': uuid.uuid4().hex} environment = {'environment': {'REMOTE_USER': 'foo@idp.com'}} self.api.authenticate(environment, auth_info, auth_context) # make sure Mapped plugin got invoked with the correct payload ((context, auth_payload, auth_context), kwargs) = authenticate.call_args self.assertEqual(method_name, auth_payload['protocol']) def test_supporting_multiple_methods(self): for method_name in ['saml2', 'openid', 'x509']: self._test_mapped_invocation_with_method_name(method_name) keystone-9.0.0/keystone/tests/unit/test_backend_ldap.py0000664000567000056710000043005012701407105024503 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- # Copyright 2012 OpenStack Foundation # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import uuid import ldap import mock from oslo_config import cfg from oslo_log import versionutils from oslotest import mockpatch import pkg_resources from six.moves import http_client from six.moves import range from testtools import matchers from keystone.common import cache from keystone.common import driver_hints from keystone.common import ldap as common_ldap from keystone.common.ldap import core as common_ldap_core from keystone import exception from keystone import identity from keystone.identity.mapping_backends import mapping as map from keystone import resource from keystone.tests import unit from keystone.tests.unit.assignment import test_backends as assignment_tests from keystone.tests.unit import default_fixtures from keystone.tests.unit.identity import test_backends as identity_tests from keystone.tests.unit import identity_mapping as mapping_sql from keystone.tests.unit.ksfixtures import database from keystone.tests.unit.ksfixtures import ldapdb from keystone.tests.unit.resource import test_backends as resource_tests from keystone.tests.unit.utils import wip CONF = cfg.CONF def _assert_backends(testcase, **kwargs): def _get_backend_cls(testcase, subsystem): observed_backend = getattr(testcase, subsystem + '_api').driver return observed_backend.__class__ def _get_domain_specific_backend_cls(manager, domain): observed_backend = manager.domain_configs.get_domain_driver(domain) return observed_backend.__class__ def _get_entrypoint_cls(subsystem, name): entrypoint = entrypoint_map['keystone.' + subsystem][name] return entrypoint.resolve() def _load_domain_specific_configs(manager): if (not manager.domain_configs.configured and CONF.identity.domain_specific_drivers_enabled): manager.domain_configs.setup_domain_drivers( manager.driver, manager.resource_api) def _assert_equal(expected_cls, observed_cls, subsystem, domain=None): msg = ('subsystem %(subsystem)s expected %(expected_cls)r, ' 'but observed %(observed_cls)r') if domain: subsystem = '%s[domain=%s]' % (subsystem, domain) assert expected_cls == observed_cls, msg % { 'expected_cls': expected_cls, 'observed_cls': observed_cls, 'subsystem': subsystem, } env = pkg_resources.Environment() keystone_dist = env['keystone'][0] entrypoint_map = pkg_resources.get_entry_map(keystone_dist) for subsystem, entrypoint_name in kwargs.items(): if isinstance(entrypoint_name, str): observed_cls = _get_backend_cls(testcase, subsystem) expected_cls = _get_entrypoint_cls(subsystem, entrypoint_name) _assert_equal(expected_cls, observed_cls, subsystem) elif isinstance(entrypoint_name, dict): manager = getattr(testcase, subsystem + '_api') _load_domain_specific_configs(manager) for domain, entrypoint_name in entrypoint_name.items(): if domain is None: observed_cls = _get_backend_cls(testcase, subsystem) expected_cls = _get_entrypoint_cls( subsystem, entrypoint_name) _assert_equal(expected_cls, observed_cls, subsystem) continue observed_cls = _get_domain_specific_backend_cls( manager, domain) expected_cls = _get_entrypoint_cls(subsystem, entrypoint_name) _assert_equal(expected_cls, observed_cls, subsystem, domain) else: raise ValueError('%r is not an expected value for entrypoint name' % entrypoint_name) def create_group_container(identity_api): # Create the groups base entry (ou=Groups,cn=example,cn=com) group_api = identity_api.driver.group conn = group_api.get_connection() dn = 'ou=Groups,cn=example,cn=com' conn.add_s(dn, [('objectclass', ['organizationalUnit']), ('ou', ['Groups'])]) class BaseLDAPIdentity(identity_tests.IdentityTests, assignment_tests.AssignmentTests, resource_tests.ResourceTests): def setUp(self): super(BaseLDAPIdentity, self).setUp() self.ldapdb = self.useFixture(ldapdb.LDAPDatabase()) self.load_backends() self.load_fixtures(default_fixtures) self.config_fixture.config(group='os_inherit', enabled=False) def _get_domain_fixture(self): """Domains in LDAP are read-only, so just return the static one.""" return self.resource_api.get_domain(CONF.identity.default_domain_id) def get_config(self, domain_id): # Only one conf structure unless we are using separate domain backends return CONF def config_overrides(self): super(BaseLDAPIdentity, self).config_overrides() self.config_fixture.config(group='identity', driver='ldap') def config_files(self): config_files = super(BaseLDAPIdentity, self).config_files() config_files.append(unit.dirs.tests_conf('backend_ldap.conf')) return config_files def new_user_ref(self, domain_id, project_id=None, **kwargs): ref = unit.new_user_ref(domain_id=domain_id, project_id=project_id, **kwargs) if 'id' not in kwargs: del ref['id'] return ref def get_user_enabled_vals(self, user): user_dn = ( self.identity_api.driver.user._id_to_dn_string(user['id'])) enabled_attr_name = CONF.ldap.user_enabled_attribute ldap_ = self.identity_api.driver.user.get_connection() res = ldap_.search_s(user_dn, ldap.SCOPE_BASE, u'(sn=%s)' % user['name']) if enabled_attr_name in res[0][1]: return res[0][1][enabled_attr_name] else: return None def test_build_tree(self): """Regression test for building the tree names.""" user_api = identity.backends.ldap.UserApi(CONF) self.assertTrue(user_api) self.assertEqual("ou=Users,%s" % CONF.ldap.suffix, user_api.tree_dn) def test_configurable_allowed_user_actions(self): user = self.new_user_ref(domain_id=CONF.identity.default_domain_id) user = self.identity_api.create_user(user) self.identity_api.get_user(user['id']) user['password'] = u'fäképass2' self.identity_api.update_user(user['id'], user) self.identity_api.delete_user(user['id']) self.assertRaises(exception.UserNotFound, self.identity_api.get_user, user['id']) def test_configurable_forbidden_user_actions(self): driver = self.identity_api._select_identity_driver( CONF.identity.default_domain_id) driver.user.allow_create = False driver.user.allow_update = False driver.user.allow_delete = False user = self.new_user_ref(domain_id=CONF.identity.default_domain_id) self.assertRaises(exception.ForbiddenAction, self.identity_api.create_user, user) self.user_foo['password'] = u'fäképass2' self.assertRaises(exception.ForbiddenAction, self.identity_api.update_user, self.user_foo['id'], self.user_foo) self.assertRaises(exception.ForbiddenAction, self.identity_api.delete_user, self.user_foo['id']) def test_configurable_forbidden_create_existing_user(self): driver = self.identity_api._select_identity_driver( CONF.identity.default_domain_id) driver.user.allow_create = False self.assertRaises(exception.ForbiddenAction, self.identity_api.create_user, self.user_foo) def test_user_filter(self): user_ref = self.identity_api.get_user(self.user_foo['id']) self.user_foo.pop('password') self.assertDictEqual(self.user_foo, user_ref) driver = self.identity_api._select_identity_driver( user_ref['domain_id']) driver.user.ldap_filter = '(CN=DOES_NOT_MATCH)' # invalidate the cache if the result is cached. self.identity_api.get_user.invalidate(self.identity_api, self.user_foo['id']) self.assertRaises(exception.UserNotFound, self.identity_api.get_user, self.user_foo['id']) def test_remove_role_grant_from_user_and_project(self): self.assignment_api.create_grant(user_id=self.user_foo['id'], project_id=self.tenant_baz['id'], role_id='member') roles_ref = self.assignment_api.list_grants( user_id=self.user_foo['id'], project_id=self.tenant_baz['id']) self.assertDictEqual(self.role_member, roles_ref[0]) self.assignment_api.delete_grant(user_id=self.user_foo['id'], project_id=self.tenant_baz['id'], role_id='member') roles_ref = self.assignment_api.list_grants( user_id=self.user_foo['id'], project_id=self.tenant_baz['id']) self.assertEqual(0, len(roles_ref)) self.assertRaises(exception.RoleAssignmentNotFound, self.assignment_api.delete_grant, user_id=self.user_foo['id'], project_id=self.tenant_baz['id'], role_id='member') def test_get_and_remove_role_grant_by_group_and_project(self): new_domain = self._get_domain_fixture() new_group = unit.new_group_ref(domain_id=new_domain['id']) new_group = self.identity_api.create_group(new_group) new_user = self.new_user_ref(domain_id=new_domain['id']) new_user = self.identity_api.create_user(new_user) self.identity_api.add_user_to_group(new_user['id'], new_group['id']) roles_ref = self.assignment_api.list_grants( group_id=new_group['id'], project_id=self.tenant_bar['id']) self.assertEqual([], roles_ref) self.assertEqual(0, len(roles_ref)) self.assignment_api.create_grant(group_id=new_group['id'], project_id=self.tenant_bar['id'], role_id='member') roles_ref = self.assignment_api.list_grants( group_id=new_group['id'], project_id=self.tenant_bar['id']) self.assertNotEmpty(roles_ref) self.assertDictEqual(self.role_member, roles_ref[0]) self.assignment_api.delete_grant(group_id=new_group['id'], project_id=self.tenant_bar['id'], role_id='member') roles_ref = self.assignment_api.list_grants( group_id=new_group['id'], project_id=self.tenant_bar['id']) self.assertEqual(0, len(roles_ref)) self.assertRaises(exception.RoleAssignmentNotFound, self.assignment_api.delete_grant, group_id=new_group['id'], project_id=self.tenant_bar['id'], role_id='member') def test_get_and_remove_role_grant_by_group_and_domain(self): # TODO(henry-nash): We should really rewrite the tests in # unit.resource.test_backends to be more flexible as to where the # domains are sourced from, so that we would not need to override such # tests here. This is raised as bug 1373865. new_domain = self._get_domain_fixture() new_group = unit.new_group_ref(domain_id=new_domain['id'],) new_group = self.identity_api.create_group(new_group) new_user = self.new_user_ref(domain_id=new_domain['id']) new_user = self.identity_api.create_user(new_user) self.identity_api.add_user_to_group(new_user['id'], new_group['id']) roles_ref = self.assignment_api.list_grants( group_id=new_group['id'], domain_id=new_domain['id']) self.assertEqual(0, len(roles_ref)) self.assignment_api.create_grant(group_id=new_group['id'], domain_id=new_domain['id'], role_id='member') roles_ref = self.assignment_api.list_grants( group_id=new_group['id'], domain_id=new_domain['id']) self.assertDictEqual(self.role_member, roles_ref[0]) self.assignment_api.delete_grant(group_id=new_group['id'], domain_id=new_domain['id'], role_id='member') roles_ref = self.assignment_api.list_grants( group_id=new_group['id'], domain_id=new_domain['id']) self.assertEqual(0, len(roles_ref)) self.assertRaises(exception.NotFound, self.assignment_api.delete_grant, group_id=new_group['id'], domain_id=new_domain['id'], role_id='member') def test_get_role_assignment_by_domain_not_found(self): self.skipTest('N/A: LDAP does not support multiple domains') def test_del_role_assignment_by_domain_not_found(self): self.skipTest('N/A: LDAP does not support multiple domains') def test_get_and_remove_role_grant_by_user_and_domain(self): self.skipTest('N/A: LDAP does not support multiple domains') def test_get_and_remove_correct_role_grant_from_a_mix(self): self.skipTest('Blocked by bug 1101287') def test_get_and_remove_role_grant_by_group_and_cross_domain(self): self.skipTest('N/A: LDAP does not support multiple domains') def test_get_and_remove_role_grant_by_user_and_cross_domain(self): self.skipTest('N/A: LDAP does not support multiple domains') def test_role_grant_by_group_and_cross_domain_project(self): self.skipTest('N/A: LDAP does not support multiple domains') def test_role_grant_by_user_and_cross_domain_project(self): self.skipTest('N/A: LDAP does not support multiple domains') def test_multi_role_grant_by_user_group_on_project_domain(self): self.skipTest('N/A: LDAP does not support multiple domains') def test_delete_role_with_user_and_group_grants(self): self.skipTest('Blocked by bug 1101287') def test_delete_user_with_group_project_domain_links(self): self.skipTest('N/A: LDAP does not support multiple domains') def test_delete_group_with_user_project_domain_links(self): self.skipTest('N/A: LDAP does not support multiple domains') def test_list_role_assignment_containing_names(self): self.skipTest('N/A: LDAP does not support multiple domains') def test_list_projects_for_user(self): domain = self._get_domain_fixture() user1 = self.new_user_ref(domain_id=domain['id']) user1 = self.identity_api.create_user(user1) user_projects = self.assignment_api.list_projects_for_user(user1['id']) self.assertThat(user_projects, matchers.HasLength(0)) # new grant(user1, role_member, tenant_bar) self.assignment_api.create_grant(user_id=user1['id'], project_id=self.tenant_bar['id'], role_id=self.role_member['id']) # new grant(user1, role_member, tenant_baz) self.assignment_api.create_grant(user_id=user1['id'], project_id=self.tenant_baz['id'], role_id=self.role_member['id']) user_projects = self.assignment_api.list_projects_for_user(user1['id']) self.assertThat(user_projects, matchers.HasLength(2)) # Now, check number of projects through groups user2 = self.new_user_ref(domain_id=domain['id']) user2 = self.identity_api.create_user(user2) group1 = unit.new_group_ref(domain_id=domain['id']) group1 = self.identity_api.create_group(group1) self.identity_api.add_user_to_group(user2['id'], group1['id']) # new grant(group1(user2), role_member, tenant_bar) self.assignment_api.create_grant(group_id=group1['id'], project_id=self.tenant_bar['id'], role_id=self.role_member['id']) # new grant(group1(user2), role_member, tenant_baz) self.assignment_api.create_grant(group_id=group1['id'], project_id=self.tenant_baz['id'], role_id=self.role_member['id']) user_projects = self.assignment_api.list_projects_for_user(user2['id']) self.assertThat(user_projects, matchers.HasLength(2)) # new grant(group1(user2), role_other, tenant_bar) self.assignment_api.create_grant(group_id=group1['id'], project_id=self.tenant_bar['id'], role_id=self.role_other['id']) user_projects = self.assignment_api.list_projects_for_user(user2['id']) self.assertThat(user_projects, matchers.HasLength(2)) def test_list_projects_for_user_and_groups(self): domain = self._get_domain_fixture() # Create user1 user1 = self.new_user_ref(domain_id=domain['id']) user1 = self.identity_api.create_user(user1) # Create new group for user1 group1 = unit.new_group_ref(domain_id=domain['id']) group1 = self.identity_api.create_group(group1) # Add user1 to group1 self.identity_api.add_user_to_group(user1['id'], group1['id']) # Now, add grant to user1 and group1 in tenant_bar self.assignment_api.create_grant(user_id=user1['id'], project_id=self.tenant_bar['id'], role_id=self.role_member['id']) self.assignment_api.create_grant(group_id=group1['id'], project_id=self.tenant_bar['id'], role_id=self.role_member['id']) # The result is user1 has only one project granted user_projects = self.assignment_api.list_projects_for_user(user1['id']) self.assertThat(user_projects, matchers.HasLength(1)) # Now, delete user1 grant into tenant_bar and check self.assignment_api.delete_grant(user_id=user1['id'], project_id=self.tenant_bar['id'], role_id=self.role_member['id']) # The result is user1 has only one project granted. # Granted through group1. user_projects = self.assignment_api.list_projects_for_user(user1['id']) self.assertThat(user_projects, matchers.HasLength(1)) def test_list_projects_for_user_with_grants(self): domain = self._get_domain_fixture() new_user = self.new_user_ref(domain_id=domain['id']) new_user = self.identity_api.create_user(new_user) group1 = unit.new_group_ref(domain_id=domain['id']) group1 = self.identity_api.create_group(group1) group2 = unit.new_group_ref(domain_id=domain['id']) group2 = self.identity_api.create_group(group2) project1 = unit.new_project_ref(domain_id=domain['id']) self.resource_api.create_project(project1['id'], project1) project2 = unit.new_project_ref(domain_id=domain['id']) self.resource_api.create_project(project2['id'], project2) self.identity_api.add_user_to_group(new_user['id'], group1['id']) self.identity_api.add_user_to_group(new_user['id'], group2['id']) self.assignment_api.create_grant(user_id=new_user['id'], project_id=self.tenant_bar['id'], role_id=self.role_member['id']) self.assignment_api.create_grant(user_id=new_user['id'], project_id=project1['id'], role_id=self.role_admin['id']) self.assignment_api.create_grant(group_id=group2['id'], project_id=project2['id'], role_id=self.role_admin['id']) user_projects = self.assignment_api.list_projects_for_user( new_user['id']) self.assertEqual(3, len(user_projects)) def test_create_duplicate_user_name_in_different_domains(self): self.skipTest('Domains are read-only against LDAP') def test_create_duplicate_project_name_in_different_domains(self): self.skipTest('Domains are read-only against LDAP') def test_create_duplicate_group_name_in_different_domains(self): self.skipTest( 'N/A: LDAP does not support multiple domains') def test_move_user_between_domains(self): self.skipTest('Domains are read-only against LDAP') def test_move_user_between_domains_with_clashing_names_fails(self): self.skipTest('Domains are read-only against LDAP') def test_move_group_between_domains(self): self.skipTest( 'N/A: LDAP does not support multiple domains') def test_move_group_between_domains_with_clashing_names_fails(self): self.skipTest('Domains are read-only against LDAP') def test_move_project_between_domains(self): self.skipTest('Domains are read-only against LDAP') def test_move_project_between_domains_with_clashing_names_fails(self): self.skipTest('Domains are read-only against LDAP') def test_get_roles_for_user_and_domain(self): self.skipTest('N/A: LDAP does not support multiple domains') def test_get_roles_for_groups_on_domain(self): self.skipTest('Blocked by bug: 1390125') def test_get_roles_for_groups_on_project(self): self.skipTest('Blocked by bug: 1390125') def test_list_domains_for_groups(self): self.skipTest('N/A: LDAP does not support multiple domains') def test_list_projects_for_groups(self): self.skipTest('Blocked by bug: 1390125') def test_domain_delete_hierarchy(self): self.skipTest('Domains are read-only against LDAP') def test_list_role_assignments_unfiltered(self): new_domain = self._get_domain_fixture() new_user = self.new_user_ref(domain_id=new_domain['id']) new_user = self.identity_api.create_user(new_user) new_group = unit.new_group_ref(domain_id=new_domain['id']) new_group = self.identity_api.create_group(new_group) new_project = unit.new_project_ref(domain_id=new_domain['id']) self.resource_api.create_project(new_project['id'], new_project) # First check how many role grant already exist existing_assignments = len(self.assignment_api.list_role_assignments()) self.assignment_api.create_grant(user_id=new_user['id'], project_id=new_project['id'], role_id='other') self.assignment_api.create_grant(group_id=new_group['id'], project_id=new_project['id'], role_id='admin') # Read back the list of assignments - check it is gone up by 2 after_assignments = len(self.assignment_api.list_role_assignments()) self.assertEqual(existing_assignments + 2, after_assignments) def test_list_role_assignments_dumb_member(self): self.config_fixture.config(group='ldap', use_dumb_member=True) self.ldapdb.clear() self.load_backends() self.load_fixtures(default_fixtures) new_domain = self._get_domain_fixture() new_user = self.new_user_ref(domain_id=new_domain['id']) new_user = self.identity_api.create_user(new_user) new_project = unit.new_project_ref(domain_id=new_domain['id']) self.resource_api.create_project(new_project['id'], new_project) self.assignment_api.create_grant(user_id=new_user['id'], project_id=new_project['id'], role_id='other') # Read back the list of assignments and ensure # that the LDAP dumb member isn't listed. assignment_ids = [a['user_id'] for a in self.assignment_api.list_role_assignments()] dumb_id = common_ldap.BaseLdap._dn_to_id(CONF.ldap.dumb_member) self.assertNotIn(dumb_id, assignment_ids) def test_list_user_ids_for_project_dumb_member(self): self.config_fixture.config(group='ldap', use_dumb_member=True) self.ldapdb.clear() self.load_backends() self.load_fixtures(default_fixtures) user = self.new_user_ref(domain_id=CONF.identity.default_domain_id) user = self.identity_api.create_user(user) self.assignment_api.add_user_to_project(self.tenant_baz['id'], user['id']) user_ids = self.assignment_api.list_user_ids_for_project( self.tenant_baz['id']) self.assertIn(user['id'], user_ids) dumb_id = common_ldap.BaseLdap._dn_to_id(CONF.ldap.dumb_member) self.assertNotIn(dumb_id, user_ids) def test_multi_group_grants_on_project_domain(self): self.skipTest('Blocked by bug 1101287') def test_list_group_members_missing_entry(self): """List group members with deleted user. If a group has a deleted entry for a member, the non-deleted members are returned. """ # Create a group group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id) group_id = self.identity_api.create_group(group)['id'] # Create a couple of users and add them to the group. user = dict(name=uuid.uuid4().hex, domain_id=CONF.identity.default_domain_id) user_1_id = self.identity_api.create_user(user)['id'] self.identity_api.add_user_to_group(user_1_id, group_id) user = dict(name=uuid.uuid4().hex, domain_id=CONF.identity.default_domain_id) user_2_id = self.identity_api.create_user(user)['id'] self.identity_api.add_user_to_group(user_2_id, group_id) # Delete user 2 # NOTE(blk-u): need to go directly to user interface to keep from # updating the group. unused, driver, entity_id = ( self.identity_api._get_domain_driver_and_entity_id(user_2_id)) driver.user.delete(entity_id) # List group users and verify only user 1. res = self.identity_api.list_users_in_group(group_id) self.assertEqual(1, len(res), "Expected 1 entry (user_1)") self.assertEqual(user_1_id, res[0]['id'], "Expected user 1 id") def test_list_group_members_when_no_members(self): # List group members when there is no member in the group. # No exception should be raised. group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id) group = self.identity_api.create_group(group) # If this doesn't raise, then the test is successful. self.identity_api.list_users_in_group(group['id']) def test_list_group_members_dumb_member(self): self.config_fixture.config(group='ldap', use_dumb_member=True) self.ldapdb.clear() self.load_backends() self.load_fixtures(default_fixtures) # Create a group group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id) group_id = self.identity_api.create_group(group)['id'] # Create a user user = dict(name=uuid.uuid4().hex, domain_id=CONF.identity.default_domain_id) user_id = self.identity_api.create_user(user)['id'] # Add user to the group self.identity_api.add_user_to_group(user_id, group_id) user_ids = self.identity_api.list_users_in_group(group_id) dumb_id = common_ldap.BaseLdap._dn_to_id(CONF.ldap.dumb_member) self.assertNotIn(dumb_id, user_ids) def test_list_domains(self): # We have more domains here than the parent class, check for the # correct number of domains for the multildap backend configs domain1 = unit.new_domain_ref() domain2 = unit.new_domain_ref() self.resource_api.create_domain(domain1['id'], domain1) self.resource_api.create_domain(domain2['id'], domain2) domains = self.resource_api.list_domains() self.assertEqual(7, len(domains)) domain_ids = [] for domain in domains: domain_ids.append(domain.get('id')) self.assertIn(CONF.identity.default_domain_id, domain_ids) self.assertIn(domain1['id'], domain_ids) self.assertIn(domain2['id'], domain_ids) def test_authenticate_requires_simple_bind(self): user = self.new_user_ref(domain_id=CONF.identity.default_domain_id) user = self.identity_api.create_user(user) self.assignment_api.add_user_to_project(self.tenant_baz['id'], user['id']) driver = self.identity_api._select_identity_driver( user['domain_id']) driver.user.LDAP_USER = None driver.user.LDAP_PASSWORD = None self.assertRaises(AssertionError, self.identity_api.authenticate, context={}, user_id=user['id'], password=None) # The group and domain CRUD tests below override the standard ones in # unit.identity.test_backends.py so that we can exclude the update name # test, since we do not (and will not) support the update of either group # or domain names with LDAP. In the tests below, the update is tested by # updating description. @mock.patch.object(versionutils, 'report_deprecated_feature') def test_group_crud(self, mock_deprecator): # NOTE(stevemar): As of the Mitaka release, we now check for calls that # the LDAP write functionality has been deprecated. group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id) group = self.identity_api.create_group(group) args, _kwargs = mock_deprecator.call_args self.assertIn("create_group for the LDAP identity backend", args[1]) group_ref = self.identity_api.get_group(group['id']) self.assertDictEqual(group, group_ref) group['description'] = uuid.uuid4().hex self.identity_api.update_group(group['id'], group) args, _kwargs = mock_deprecator.call_args self.assertIn("update_group for the LDAP identity backend", args[1]) group_ref = self.identity_api.get_group(group['id']) self.assertDictEqual(group, group_ref) self.identity_api.delete_group(group['id']) args, _kwargs = mock_deprecator.call_args self.assertIn("delete_group for the LDAP identity backend", args[1]) self.assertRaises(exception.GroupNotFound, self.identity_api.get_group, group['id']) @mock.patch.object(versionutils, 'report_deprecated_feature') def test_add_remove_user_group_deprecated(self, mock_deprecator): group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id) group = self.identity_api.create_group(group) user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user = self.identity_api.create_user(user) self.identity_api.add_user_to_group(user['id'], group['id']) args, _kwargs = mock_deprecator.call_args self.assertIn("add_user_to_group for the LDAP identity", args[1]) self.identity_api.remove_user_from_group(user['id'], group['id']) args, _kwargs = mock_deprecator.call_args self.assertIn("remove_user_from_group for the LDAP identity", args[1]) @unit.skip_if_cache_disabled('identity') def test_cache_layer_group_crud(self): group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id) group = self.identity_api.create_group(group) # cache the result group_ref = self.identity_api.get_group(group['id']) # delete the group bypassing identity api. domain_id, driver, entity_id = ( self.identity_api._get_domain_driver_and_entity_id(group['id'])) driver.delete_group(entity_id) self.assertEqual(group_ref, self.identity_api.get_group(group['id'])) self.identity_api.get_group.invalidate(self.identity_api, group['id']) self.assertRaises(exception.GroupNotFound, self.identity_api.get_group, group['id']) group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id) group = self.identity_api.create_group(group) # cache the result self.identity_api.get_group(group['id']) group['description'] = uuid.uuid4().hex group_ref = self.identity_api.update_group(group['id'], group) self.assertDictContainsSubset(self.identity_api.get_group(group['id']), group_ref) def test_create_user_none_mapping(self): # When create a user where an attribute maps to None, the entry is # created without that attribute and it doesn't fail with a TypeError. driver = self.identity_api._select_identity_driver( CONF.identity.default_domain_id) driver.user.attribute_ignore = ['enabled', 'email', 'tenants', 'tenantId'] user = self.new_user_ref(domain_id=CONF.identity.default_domain_id, project_id='maps_to_none') # If this doesn't raise, then the test is successful. user = self.identity_api.create_user(user) def test_create_user_with_boolean_string_names(self): # Ensure that any attribute that is equal to the string 'TRUE' # or 'FALSE' will not be converted to a boolean value, it # should be returned as is. boolean_strings = ['TRUE', 'FALSE', 'true', 'false', 'True', 'False', 'TrUe' 'FaLse'] for name in boolean_strings: user = self.new_user_ref(name=name, domain_id=CONF.identity.default_domain_id) user_ref = self.identity_api.create_user(user) user_info = self.identity_api.get_user(user_ref['id']) self.assertEqual(name, user_info['name']) # Delete the user to ensure that the Keystone uniqueness # requirements combined with the case-insensitive nature of a # typical LDAP schema does not cause subsequent names in # boolean_strings to clash. self.identity_api.delete_user(user_ref['id']) def test_unignored_user_none_mapping(self): # Ensure that an attribute that maps to None that is not explicitly # ignored in configuration is implicitly ignored without triggering # an error. driver = self.identity_api._select_identity_driver( CONF.identity.default_domain_id) driver.user.attribute_ignore = ['enabled', 'email', 'tenants', 'tenantId'] user = self.new_user_ref(domain_id=CONF.identity.default_domain_id) user_ref = self.identity_api.create_user(user) # If this doesn't raise, then the test is successful. self.identity_api.get_user(user_ref['id']) def test_update_user_name(self): """A user's name cannot be changed through the LDAP driver.""" self.assertRaises(exception.Conflict, super(BaseLDAPIdentity, self).test_update_user_name) def test_arbitrary_attributes_are_returned_from_get_user(self): self.skipTest("Using arbitrary attributes doesn't work under LDAP") def test_new_arbitrary_attributes_are_returned_from_update_user(self): self.skipTest("Using arbitrary attributes doesn't work under LDAP") def test_updated_arbitrary_attributes_are_returned_from_update_user(self): self.skipTest("Using arbitrary attributes doesn't work under LDAP") def test_cache_layer_domain_crud(self): # TODO(morganfainberg): This also needs to be removed when full LDAP # implementation is submitted. No need to duplicate the above test, # just skip this time. self.skipTest('Domains are read-only against LDAP') def test_user_id_comma(self): """Even if the user has a , in their ID, groups can be listed.""" # Create a user with a , in their ID # NOTE(blk-u): the DN for this user is hard-coded in fakeldap! # Since we want to fake up this special ID, we'll squirt this # direct into the driver and bypass the manager layer. user_id = u'Doe, John' user = self.new_user_ref(id=user_id, domain_id=CONF.identity.default_domain_id) user = self.identity_api.driver.create_user(user_id, user) # Now we'll use the manager to discover it, which will create a # Public ID for it. ref_list = self.identity_api.list_users() public_user_id = None for ref in ref_list: if ref['name'] == user['name']: public_user_id = ref['id'] break # Create a group group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id) group_id = group['id'] group = self.identity_api.driver.create_group(group_id, group) # Now we'll use the manager to discover it, which will create a # Public ID for it. ref_list = self.identity_api.list_groups() public_group_id = None for ref in ref_list: if ref['name'] == group['name']: public_group_id = ref['id'] break # Put the user in the group self.identity_api.add_user_to_group(public_user_id, public_group_id) # List groups for user. ref_list = self.identity_api.list_groups_for_user(public_user_id) group['id'] = public_group_id self.assertThat(ref_list, matchers.Equals([group])) def test_user_id_comma_grants(self): """List user and group grants, even with a comma in the user's ID.""" # Create a user with a , in their ID # NOTE(blk-u): the DN for this user is hard-coded in fakeldap! # Since we want to fake up this special ID, we'll squirt this # direct into the driver and bypass the manager layer user_id = u'Doe, John' user = self.new_user_ref(id=user_id, domain_id=CONF.identity.default_domain_id) self.identity_api.driver.create_user(user_id, user) # Now we'll use the manager to discover it, which will create a # Public ID for it. ref_list = self.identity_api.list_users() public_user_id = None for ref in ref_list: if ref['name'] == user['name']: public_user_id = ref['id'] break # Grant the user a role on a project. role_id = 'member' project_id = self.tenant_baz['id'] self.assignment_api.create_grant(role_id, user_id=public_user_id, project_id=project_id) role_ref = self.assignment_api.get_grant(role_id, user_id=public_user_id, project_id=project_id) self.assertEqual(role_id, role_ref['id']) def test_user_enabled_ignored_disable_error(self): # When the server is configured so that the enabled attribute is # ignored for users, users cannot be disabled. self.config_fixture.config(group='ldap', user_attribute_ignore=['enabled']) # Need to re-load backends for the config change to take effect. self.load_backends() # Attempt to disable the user. self.assertRaises(exception.ForbiddenAction, self.identity_api.update_user, self.user_foo['id'], {'enabled': False}) user_info = self.identity_api.get_user(self.user_foo['id']) # If 'enabled' is ignored then 'enabled' isn't returned as part of the # ref. self.assertNotIn('enabled', user_info) def test_group_enabled_ignored_disable_error(self): # When the server is configured so that the enabled attribute is # ignored for groups, groups cannot be disabled. self.config_fixture.config(group='ldap', group_attribute_ignore=['enabled']) # Need to re-load backends for the config change to take effect. self.load_backends() # There's no group fixture so create a group. new_domain = self._get_domain_fixture() new_group = unit.new_group_ref(domain_id=new_domain['id']) new_group = self.identity_api.create_group(new_group) # Attempt to disable the group. self.assertRaises(exception.ForbiddenAction, self.identity_api.update_group, new_group['id'], {'enabled': False}) group_info = self.identity_api.get_group(new_group['id']) # If 'enabled' is ignored then 'enabled' isn't returned as part of the # ref. self.assertNotIn('enabled', group_info) def test_project_enabled_ignored_disable_error(self): self.skipTest('Resource LDAP has been removed') def test_list_role_assignment_by_domain(self): """Multiple domain assignments are not supported.""" self.assertRaises( (exception.Forbidden, exception.DomainNotFound, exception.ValidationError), super(BaseLDAPIdentity, self).test_list_role_assignment_by_domain) def test_list_role_assignment_by_user_with_domain_group_roles(self): """Multiple domain assignments are not supported.""" self.assertRaises( (exception.Forbidden, exception.DomainNotFound, exception.ValidationError), super(BaseLDAPIdentity, self). test_list_role_assignment_by_user_with_domain_group_roles) def test_domain_crud(self): self.skipTest('Resource LDAP has been removed') def test_list_role_assignment_using_sourced_groups_with_domains(self): """Multiple domain assignments are not supported.""" self.assertRaises( (exception.Forbidden, exception.ValidationError, exception.DomainNotFound), super(BaseLDAPIdentity, self). test_list_role_assignment_using_sourced_groups_with_domains) def test_create_project_with_domain_id_and_without_parent_id(self): """Multiple domains are not supported.""" self.assertRaises( exception.ValidationError, super(BaseLDAPIdentity, self). test_create_project_with_domain_id_and_without_parent_id) def test_create_project_with_domain_id_mismatch_to_parent_domain(self): """Multiple domains are not supported.""" self.assertRaises( exception.ValidationError, super(BaseLDAPIdentity, self). test_create_project_with_domain_id_mismatch_to_parent_domain) def test_remove_foreign_assignments_when_deleting_a_domain(self): """Multiple domains are not supported.""" self.assertRaises( (exception.ValidationError, exception.DomainNotFound), super(BaseLDAPIdentity, self).test_remove_foreign_assignments_when_deleting_a_domain) class LDAPIdentity(BaseLDAPIdentity, unit.TestCase): def setUp(self): # NOTE(dstanek): The database must be setup prior to calling the # parent's setUp. The parent's setUp uses services (like # credentials) that require a database. self.useFixture(database.Database()) super(LDAPIdentity, self).setUp() _assert_backends(self, assignment='sql', identity='ldap', resource='sql') def load_fixtures(self, fixtures): # Override super impl since need to create group container. create_group_container(self.identity_api) super(LDAPIdentity, self).load_fixtures(fixtures) def test_list_domains(self): domains = self.resource_api.list_domains() self.assertEqual([resource.calc_default_domain()], domains) def test_configurable_allowed_project_actions(self): domain = self._get_domain_fixture() project = unit.new_project_ref(domain_id=domain['id']) project = self.resource_api.create_project(project['id'], project) project_ref = self.resource_api.get_project(project['id']) self.assertEqual(project['id'], project_ref['id']) project['enabled'] = False self.resource_api.update_project(project['id'], project) self.resource_api.delete_project(project['id']) self.assertRaises(exception.ProjectNotFound, self.resource_api.get_project, project['id']) def test_configurable_subtree_delete(self): self.config_fixture.config(group='ldap', allow_subtree_delete=True) self.load_backends() project1 = unit.new_project_ref( domain_id=CONF.identity.default_domain_id) self.resource_api.create_project(project1['id'], project1) role1 = unit.new_role_ref() self.role_api.create_role(role1['id'], role1) user1 = self.new_user_ref(domain_id=CONF.identity.default_domain_id) user1 = self.identity_api.create_user(user1) self.assignment_api.add_role_to_user_and_project( user_id=user1['id'], tenant_id=project1['id'], role_id=role1['id']) self.resource_api.delete_project(project1['id']) self.assertRaises(exception.ProjectNotFound, self.resource_api.get_project, project1['id']) self.resource_api.create_project(project1['id'], project1) list = self.assignment_api.get_roles_for_user_and_project( user1['id'], project1['id']) self.assertEqual(0, len(list)) def test_configurable_forbidden_project_actions(self): self.skipTest('Resource LDAP has been removed') def test_project_filter(self): self.skipTest('Resource LDAP has been removed') def test_dumb_member(self): self.config_fixture.config(group='ldap', use_dumb_member=True) self.ldapdb.clear() self.load_backends() self.load_fixtures(default_fixtures) dumb_id = common_ldap.BaseLdap._dn_to_id(CONF.ldap.dumb_member) self.assertRaises(exception.UserNotFound, self.identity_api.get_user, dumb_id) def test_project_attribute_mapping(self): self.skipTest('Resource LDAP has been removed') def test_project_attribute_ignore(self): self.skipTest('Resource LDAP has been removed') def test_user_enable_attribute_mask(self): self.config_fixture.config(group='ldap', user_enabled_mask=2, user_enabled_default='512') self.ldapdb.clear() self.load_backends() self.load_fixtures(default_fixtures) user = self.new_user_ref(domain_id=CONF.identity.default_domain_id) user_ref = self.identity_api.create_user(user) # Use assertIs rather than assertTrue because assertIs will assert the # value is a Boolean as expected. self.assertIs(user_ref['enabled'], True) self.assertNotIn('enabled_nomask', user_ref) enabled_vals = self.get_user_enabled_vals(user_ref) self.assertEqual([512], enabled_vals) user_ref = self.identity_api.get_user(user_ref['id']) self.assertIs(user_ref['enabled'], True) self.assertNotIn('enabled_nomask', user_ref) user['enabled'] = False user_ref = self.identity_api.update_user(user_ref['id'], user) self.assertIs(user_ref['enabled'], False) self.assertNotIn('enabled_nomask', user_ref) enabled_vals = self.get_user_enabled_vals(user_ref) self.assertEqual([514], enabled_vals) user_ref = self.identity_api.get_user(user_ref['id']) self.assertIs(user_ref['enabled'], False) self.assertNotIn('enabled_nomask', user_ref) user['enabled'] = True user_ref = self.identity_api.update_user(user_ref['id'], user) self.assertIs(user_ref['enabled'], True) self.assertNotIn('enabled_nomask', user_ref) enabled_vals = self.get_user_enabled_vals(user_ref) self.assertEqual([512], enabled_vals) user_ref = self.identity_api.get_user(user_ref['id']) self.assertIs(user_ref['enabled'], True) self.assertNotIn('enabled_nomask', user_ref) def test_user_enabled_invert(self): self.config_fixture.config(group='ldap', user_enabled_invert=True, user_enabled_default=False) self.ldapdb.clear() self.load_backends() self.load_fixtures(default_fixtures) user1 = self.new_user_ref(domain_id=CONF.identity.default_domain_id) user2 = self.new_user_ref(enabled=False, domain_id=CONF.identity.default_domain_id) user3 = self.new_user_ref(domain_id=CONF.identity.default_domain_id) # Ensure that the LDAP attribute is False for a newly created # enabled user. user_ref = self.identity_api.create_user(user1) self.assertIs(True, user_ref['enabled']) enabled_vals = self.get_user_enabled_vals(user_ref) self.assertEqual([False], enabled_vals) user_ref = self.identity_api.get_user(user_ref['id']) self.assertIs(True, user_ref['enabled']) # Ensure that the LDAP attribute is True for a disabled user. user1['enabled'] = False user_ref = self.identity_api.update_user(user_ref['id'], user1) self.assertIs(False, user_ref['enabled']) enabled_vals = self.get_user_enabled_vals(user_ref) self.assertEqual([True], enabled_vals) # Enable the user and ensure that the LDAP attribute is True again. user1['enabled'] = True user_ref = self.identity_api.update_user(user_ref['id'], user1) self.assertIs(True, user_ref['enabled']) enabled_vals = self.get_user_enabled_vals(user_ref) self.assertEqual([False], enabled_vals) # Ensure that the LDAP attribute is True for a newly created # disabled user. user_ref = self.identity_api.create_user(user2) self.assertIs(False, user_ref['enabled']) enabled_vals = self.get_user_enabled_vals(user_ref) self.assertEqual([True], enabled_vals) user_ref = self.identity_api.get_user(user_ref['id']) self.assertIs(False, user_ref['enabled']) # Ensure that the LDAP attribute is inverted for a newly created # user when the user_enabled_default setting is used. user_ref = self.identity_api.create_user(user3) self.assertIs(True, user_ref['enabled']) enabled_vals = self.get_user_enabled_vals(user_ref) self.assertEqual([False], enabled_vals) user_ref = self.identity_api.get_user(user_ref['id']) self.assertIs(True, user_ref['enabled']) @mock.patch.object(common_ldap_core.BaseLdap, '_ldap_get') def test_user_enabled_invert_no_enabled_value(self, mock_ldap_get): self.config_fixture.config(group='ldap', user_enabled_invert=True, user_enabled_default=False) # Mock the search results to return an entry with # no enabled value. mock_ldap_get.return_value = ( 'cn=junk,dc=example,dc=com', { 'sn': [uuid.uuid4().hex], 'email': [uuid.uuid4().hex], 'cn': ['junk'] } ) user_api = identity.backends.ldap.UserApi(CONF) user_ref = user_api.get('junk') # Ensure that the model enabled attribute is inverted # from the resource default. self.assertIs(not CONF.ldap.user_enabled_default, user_ref['enabled']) @mock.patch.object(common_ldap_core.BaseLdap, '_ldap_get') def test_user_enabled_invert_default_str_value(self, mock_ldap_get): self.config_fixture.config(group='ldap', user_enabled_invert=True, user_enabled_default='False') # Mock the search results to return an entry with # no enabled value. mock_ldap_get.return_value = ( 'cn=junk,dc=example,dc=com', { 'sn': [uuid.uuid4().hex], 'email': [uuid.uuid4().hex], 'cn': ['junk'] } ) user_api = identity.backends.ldap.UserApi(CONF) user_ref = user_api.get('junk') # Ensure that the model enabled attribute is inverted # from the resource default. self.assertIs(True, user_ref['enabled']) @mock.patch.object(common_ldap_core.BaseLdap, '_ldap_get') def test_user_enabled_attribute_handles_expired(self, mock_ldap_get): # If using 'passwordisexpired' as enabled attribute, and inverting it, # Then an unauthorized user (expired password) should not be enabled. self.config_fixture.config(group='ldap', user_enabled_invert=True, user_enabled_attribute='passwordisexpired') mock_ldap_get.return_value = ( u'uid=123456789,c=us,ou=our_ldap,o=acme.com', { 'uid': [123456789], 'mail': ['shaun@acme.com'], 'passwordisexpired': ['TRUE'], 'cn': ['uid=123456789,c=us,ou=our_ldap,o=acme.com'] } ) user_api = identity.backends.ldap.UserApi(CONF) user_ref = user_api.get('123456789') self.assertIs(False, user_ref['enabled']) @mock.patch.object(common_ldap_core.BaseLdap, '_ldap_get') def test_user_enabled_attribute_handles_utf8(self, mock_ldap_get): # If using 'passwordisexpired' as enabled attribute, and inverting it, # and the result is utf8 encoded, then the an authorized user should # be enabled. self.config_fixture.config(group='ldap', user_enabled_invert=True, user_enabled_attribute='passwordisexpired') mock_ldap_get.return_value = ( u'uid=123456789,c=us,ou=our_ldap,o=acme.com', { 'uid': [123456789], 'mail': [u'shaun@acme.com'], 'passwordisexpired': [u'false'], 'cn': [u'uid=123456789,c=us,ou=our_ldap,o=acme.com'] } ) user_api = identity.backends.ldap.UserApi(CONF) user_ref = user_api.get('123456789') self.assertIs(True, user_ref['enabled']) @mock.patch.object(common_ldap_core.KeystoneLDAPHandler, 'simple_bind_s') def test_user_api_get_connection_no_user_password(self, mocked_method): """Don't bind in case the user and password are blank.""" # Ensure the username/password are in-fact blank self.config_fixture.config(group='ldap', user=None, password=None) user_api = identity.backends.ldap.UserApi(CONF) user_api.get_connection(user=None, password=None) self.assertFalse(mocked_method.called, msg='`simple_bind_s` method was unexpectedly called') @mock.patch.object(common_ldap_core.KeystoneLDAPHandler, 'connect') def test_chase_referrals_off(self, mocked_fakeldap): self.config_fixture.config( group='ldap', url='fake://memory', chase_referrals=False) user_api = identity.backends.ldap.UserApi(CONF) user_api.get_connection(user=None, password=None) # The last call_arg should be a dictionary and should contain # chase_referrals. Check to make sure the value of chase_referrals # is as expected. self.assertFalse(mocked_fakeldap.call_args[-1]['chase_referrals']) @mock.patch.object(common_ldap_core.KeystoneLDAPHandler, 'connect') def test_chase_referrals_on(self, mocked_fakeldap): self.config_fixture.config( group='ldap', url='fake://memory', chase_referrals=True) user_api = identity.backends.ldap.UserApi(CONF) user_api.get_connection(user=None, password=None) # The last call_arg should be a dictionary and should contain # chase_referrals. Check to make sure the value of chase_referrals # is as expected. self.assertTrue(mocked_fakeldap.call_args[-1]['chase_referrals']) @mock.patch.object(common_ldap_core.KeystoneLDAPHandler, 'connect') def test_debug_level_set(self, mocked_fakeldap): level = 12345 self.config_fixture.config( group='ldap', url='fake://memory', debug_level=level) user_api = identity.backends.ldap.UserApi(CONF) user_api.get_connection(user=None, password=None) # The last call_arg should be a dictionary and should contain # debug_level. Check to make sure the value of debug_level # is as expected. self.assertEqual(level, mocked_fakeldap.call_args[-1]['debug_level']) def test_wrong_ldap_scope(self): self.config_fixture.config(group='ldap', query_scope=uuid.uuid4().hex) self.assertRaisesRegexp( ValueError, 'Invalid LDAP scope: %s. *' % CONF.ldap.query_scope, identity.backends.ldap.Identity) def test_wrong_alias_dereferencing(self): self.config_fixture.config(group='ldap', alias_dereferencing=uuid.uuid4().hex) self.assertRaisesRegexp( ValueError, 'Invalid LDAP deref option: %s\.' % CONF.ldap.alias_dereferencing, identity.backends.ldap.Identity) def test_is_dumb_member(self): self.config_fixture.config(group='ldap', use_dumb_member=True) self.load_backends() dn = 'cn=dumb,dc=nonexistent' self.assertTrue(self.identity_api.driver.user._is_dumb_member(dn)) def test_is_dumb_member_upper_case_keys(self): self.config_fixture.config(group='ldap', use_dumb_member=True) self.load_backends() dn = 'CN=dumb,DC=nonexistent' self.assertTrue(self.identity_api.driver.user._is_dumb_member(dn)) def test_is_dumb_member_with_false_use_dumb_member(self): self.config_fixture.config(group='ldap', use_dumb_member=False) self.load_backends() dn = 'cn=dumb,dc=nonexistent' self.assertFalse(self.identity_api.driver.user._is_dumb_member(dn)) def test_is_dumb_member_not_dumb(self): self.config_fixture.config(group='ldap', use_dumb_member=True) self.load_backends() dn = 'ou=some,dc=example.com' self.assertFalse(self.identity_api.driver.user._is_dumb_member(dn)) def test_user_extra_attribute_mapping(self): self.config_fixture.config( group='ldap', user_additional_attribute_mapping=['description:name']) self.load_backends() user = self.new_user_ref(name='EXTRA_ATTRIBUTES', password='extra', domain_id=CONF.identity.default_domain_id) user = self.identity_api.create_user(user) dn, attrs = self.identity_api.driver.user._ldap_get(user['id']) self.assertThat([user['name']], matchers.Equals(attrs['description'])) def test_user_description_attribute_mapping(self): self.config_fixture.config( group='ldap', user_description_attribute='displayName') self.load_backends() user = self.new_user_ref(domain_id=CONF.identity.default_domain_id, displayName=uuid.uuid4().hex) description = user['displayName'] user = self.identity_api.create_user(user) res = self.identity_api.driver.user.get_all() new_user = [u for u in res if u['id'] == user['id']][0] self.assertThat(new_user['description'], matchers.Equals(description)) def test_user_extra_attribute_mapping_description_is_returned(self): # Given a mapping like description:description, the description is # returned. self.config_fixture.config( group='ldap', user_additional_attribute_mapping=['description:description']) self.load_backends() user = self.new_user_ref(domain_id=CONF.identity.default_domain_id, description=uuid.uuid4().hex) description = user['description'] user = self.identity_api.create_user(user) res = self.identity_api.driver.user.get_all() new_user = [u for u in res if u['id'] == user['id']][0] self.assertThat(new_user['description'], matchers.Equals(description)) def test_user_with_missing_id(self): # create a user that doesn't have the id attribute ldap_ = self.identity_api.driver.user.get_connection() # `sn` is used for the attribute in the DN because it's allowed by # the entry's objectclasses so that this test could conceivably run in # the live tests. ldap_id_field = 'sn' ldap_id_value = uuid.uuid4().hex dn = '%s=%s,ou=Users,cn=example,cn=com' % (ldap_id_field, ldap_id_value) modlist = [('objectClass', ['person', 'inetOrgPerson']), (ldap_id_field, [ldap_id_value]), ('mail', ['email@example.com']), ('userPassword', [uuid.uuid4().hex])] ldap_.add_s(dn, modlist) # make sure the user doesn't break other users users = self.identity_api.driver.user.get_all() self.assertThat(users, matchers.HasLength(len(default_fixtures.USERS))) @mock.patch.object(common_ldap_core.BaseLdap, '_ldap_get') def test_user_mixed_case_attribute(self, mock_ldap_get): # Mock the search results to return attribute names # with unexpected case. mock_ldap_get.return_value = ( 'cn=junk,dc=example,dc=com', { 'sN': [uuid.uuid4().hex], 'MaIl': [uuid.uuid4().hex], 'cn': ['junk'] } ) user = self.identity_api.get_user('junk') self.assertEqual(mock_ldap_get.return_value[1]['sN'][0], user['name']) self.assertEqual(mock_ldap_get.return_value[1]['MaIl'][0], user['email']) def test_parse_extra_attribute_mapping(self): option_list = ['description:name', 'gecos:password', 'fake:invalid', 'invalid1', 'invalid2:', 'description:name:something'] mapping = self.identity_api.driver.user._parse_extra_attrs(option_list) expected_dict = {'description': 'name', 'gecos': 'password', 'fake': 'invalid', 'invalid2': ''} self.assertDictEqual(expected_dict, mapping) def test_create_domain(self): domain = unit.new_domain_ref() self.assertRaises(exception.ValidationError, self.resource_api.create_domain, domain['id'], domain) @unit.skip_if_no_multiple_domains_support def test_create_domain_case_sensitivity(self): # domains are read-only, so case sensitivity isn't an issue ref = unit.new_domain_ref() self.assertRaises(exception.Forbidden, self.resource_api.create_domain, ref['id'], ref) def test_cache_layer_domain_crud(self): # TODO(morganfainberg): This also needs to be removed when full LDAP # implementation is submitted. No need to duplicate the above test, # just skip this time. self.skipTest('Domains are read-only against LDAP') def test_domain_rename_invalidates_get_domain_by_name_cache(self): parent = super(LDAPIdentity, self) self.assertRaises( exception.Forbidden, parent.test_domain_rename_invalidates_get_domain_by_name_cache) def test_project_rename_invalidates_get_project_by_name_cache(self): parent = super(LDAPIdentity, self) self.assertRaises( exception.Forbidden, parent.test_project_rename_invalidates_get_project_by_name_cache) def test_project_crud(self): # NOTE(topol): LDAP implementation does not currently support the # updating of a project name so this method override # provides a different update test project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id) project = self.resource_api.create_project(project['id'], project) project_ref = self.resource_api.get_project(project['id']) self.assertDictEqual(project, project_ref) project['description'] = uuid.uuid4().hex self.resource_api.update_project(project['id'], project) project_ref = self.resource_api.get_project(project['id']) self.assertDictEqual(project, project_ref) self.resource_api.delete_project(project['id']) self.assertRaises(exception.ProjectNotFound, self.resource_api.get_project, project['id']) @unit.skip_if_cache_disabled('assignment') def test_cache_layer_project_crud(self): # NOTE(morganfainberg): LDAP implementation does not currently support # updating project names. This method override provides a different # update test. project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id) project_id = project['id'] # Create a project project = self.resource_api.create_project(project_id, project) self.resource_api.get_project(project_id) updated_project = copy.deepcopy(project) updated_project['description'] = uuid.uuid4().hex # Update project, bypassing resource manager self.resource_api.driver.update_project(project_id, updated_project) # Verify get_project still returns the original project_ref self.assertDictContainsSubset( project, self.resource_api.get_project(project_id)) # Invalidate cache self.resource_api.get_project.invalidate(self.resource_api, project_id) # Verify get_project now returns the new project self.assertDictContainsSubset( updated_project, self.resource_api.get_project(project_id)) # Update project using the resource_api manager back to original self.resource_api.update_project(project['id'], project) # Verify get_project returns the original project_ref self.assertDictContainsSubset( project, self.resource_api.get_project(project_id)) # Delete project bypassing resource_api self.resource_api.driver.delete_project(project_id) # Verify get_project still returns the project_ref self.assertDictContainsSubset( project, self.resource_api.get_project(project_id)) # Invalidate cache self.resource_api.get_project.invalidate(self.resource_api, project_id) # Verify ProjectNotFound now raised self.assertRaises(exception.ProjectNotFound, self.resource_api.get_project, project_id) # recreate project self.resource_api.create_project(project_id, project) self.resource_api.get_project(project_id) # delete project self.resource_api.delete_project(project_id) # Verify ProjectNotFound is raised self.assertRaises(exception.ProjectNotFound, self.resource_api.get_project, project_id) def test_update_is_domain_field(self): domain = self._get_domain_fixture() project = unit.new_project_ref(domain_id=domain['id']) project = self.resource_api.create_project(project['id'], project) # Try to update the is_domain field to True project['is_domain'] = True self.assertRaises(exception.ValidationError, self.resource_api.update_project, project['id'], project) def test_delete_is_domain_project(self): self.skipTest('Resource LDAP has been removed') def test_create_domain_under_regular_project_hierarchy_fails(self): self.skipTest('Resource LDAP has been removed') def test_create_not_is_domain_project_under_is_domain_hierarchy(self): self.skipTest('Resource LDAP has been removed') def test_create_project_passing_is_domain_flag_true(self): self.skipTest('Resource LDAP has been removed') def test_create_project_with_parent_id_and_without_domain_id(self): self.skipTest('Resource LDAP has been removed') def test_check_leaf_projects(self): self.skipTest('Resource LDAP has been removed') def test_list_projects_in_subtree(self): self.skipTest('Resource LDAP has been removed') def test_list_projects_in_subtree_with_circular_reference(self): self.skipTest('Resource LDAP has been removed') def test_list_project_parents(self): self.skipTest('Resource LDAP has been removed') def test_update_project_enabled_cascade(self): self.skipTest('Resource LDAP has been removed') def test_cannot_enable_cascade_with_parent_disabled(self): self.skipTest('Resource LDAP has been removed') def test_hierarchical_projects_crud(self): self.skipTest('Resource LDAP has been removed') def test_create_project_under_disabled_one(self): self.skipTest('Resource LDAP has been removed') def test_create_project_with_invalid_parent(self): self.skipTest('Resource LDAP has been removed') def test_create_leaf_project_with_invalid_domain(self): self.skipTest('Resource LDAP has been removed') def test_update_project_parent(self): self.skipTest('Resource LDAP has been removed') def test_enable_project_with_disabled_parent(self): self.skipTest('Resource LDAP has been removed') def test_disable_hierarchical_leaf_project(self): self.skipTest('Resource LDAP has been removed') def test_disable_hierarchical_not_leaf_project(self): self.skipTest('Resource LDAP has been removed') def test_delete_hierarchical_leaf_project(self): self.skipTest('Resource LDAP has been removed') def test_delete_hierarchical_not_leaf_project(self): self.skipTest('Resource LDAP has been removed') def test_check_hierarchy_depth(self): self.skipTest('Resource LDAP has been removed') def test_multi_role_grant_by_user_group_on_project_domain(self): # This is a partial implementation of the standard test that # is defined in unit.assignment.test_backends.py. It omits # both domain and group grants. since neither of these are # yet supported by the ldap backend. role_list = [] for _ in range(2): role = unit.new_role_ref() self.role_api.create_role(role['id'], role) role_list.append(role) user1 = self.new_user_ref(domain_id=CONF.identity.default_domain_id) user1 = self.identity_api.create_user(user1) project1 = unit.new_project_ref( domain_id=CONF.identity.default_domain_id) self.resource_api.create_project(project1['id'], project1) self.assignment_api.add_role_to_user_and_project( user_id=user1['id'], tenant_id=project1['id'], role_id=role_list[0]['id']) self.assignment_api.add_role_to_user_and_project( user_id=user1['id'], tenant_id=project1['id'], role_id=role_list[1]['id']) # Although list_grants are not yet supported, we can test the # alternate way of getting back lists of grants, where user # and group roles are combined. Only directly assigned user # roles are available, since group grants are not yet supported combined_list = self.assignment_api.get_roles_for_user_and_project( user1['id'], project1['id']) self.assertEqual(2, len(combined_list)) self.assertIn(role_list[0]['id'], combined_list) self.assertIn(role_list[1]['id'], combined_list) # Finally, although domain roles are not implemented, check we can # issue the combined get roles call with benign results, since thus is # used in token generation combined_role_list = self.assignment_api.get_roles_for_user_and_domain( user1['id'], CONF.identity.default_domain_id) self.assertEqual(0, len(combined_role_list)) def test_list_projects_for_alternate_domain(self): self.skipTest( 'N/A: LDAP does not support multiple domains') def test_get_default_domain_by_name(self): domain = self._get_domain_fixture() domain_ref = self.resource_api.get_domain_by_name(domain['name']) self.assertEqual(domain_ref, domain) def test_base_ldap_connection_deref_option(self): def get_conn(deref_name): self.config_fixture.config(group='ldap', alias_dereferencing=deref_name) base_ldap = common_ldap.BaseLdap(CONF) return base_ldap.get_connection() conn = get_conn('default') self.assertEqual(ldap.get_option(ldap.OPT_DEREF), conn.get_option(ldap.OPT_DEREF)) conn = get_conn('always') self.assertEqual(ldap.DEREF_ALWAYS, conn.get_option(ldap.OPT_DEREF)) conn = get_conn('finding') self.assertEqual(ldap.DEREF_FINDING, conn.get_option(ldap.OPT_DEREF)) conn = get_conn('never') self.assertEqual(ldap.DEREF_NEVER, conn.get_option(ldap.OPT_DEREF)) conn = get_conn('searching') self.assertEqual(ldap.DEREF_SEARCHING, conn.get_option(ldap.OPT_DEREF)) def test_list_users_no_dn(self): users = self.identity_api.list_users() self.assertEqual(len(default_fixtures.USERS), len(users)) user_ids = set(user['id'] for user in users) expected_user_ids = set(getattr(self, 'user_%s' % user['id'])['id'] for user in default_fixtures.USERS) for user_ref in users: self.assertNotIn('dn', user_ref) self.assertEqual(expected_user_ids, user_ids) def test_list_groups_no_dn(self): # Create some test groups. domain = self._get_domain_fixture() expected_group_ids = [] numgroups = 3 for _ in range(numgroups): group = unit.new_group_ref(domain_id=domain['id']) group = self.identity_api.create_group(group) expected_group_ids.append(group['id']) # Fetch the test groups and ensure that they don't contain a dn. groups = self.identity_api.list_groups() self.assertEqual(numgroups, len(groups)) group_ids = set(group['id'] for group in groups) for group_ref in groups: self.assertNotIn('dn', group_ref) self.assertEqual(set(expected_group_ids), group_ids) def test_list_groups_for_user_no_dn(self): # Create a test user. user = self.new_user_ref(domain_id=CONF.identity.default_domain_id) user = self.identity_api.create_user(user) # Create some test groups and add the test user as a member. domain = self._get_domain_fixture() expected_group_ids = [] numgroups = 3 for _ in range(numgroups): group = unit.new_group_ref(domain_id=domain['id']) group = self.identity_api.create_group(group) expected_group_ids.append(group['id']) self.identity_api.add_user_to_group(user['id'], group['id']) # Fetch the groups for the test user # and ensure they don't contain a dn. groups = self.identity_api.list_groups_for_user(user['id']) self.assertEqual(numgroups, len(groups)) group_ids = set(group['id'] for group in groups) for group_ref in groups: self.assertNotIn('dn', group_ref) self.assertEqual(set(expected_group_ids), group_ids) def test_user_id_attribute_in_create(self): driver = self.identity_api._select_identity_driver( CONF.identity.default_domain_id) driver.user.id_attr = 'mail' user = self.new_user_ref(domain_id=CONF.identity.default_domain_id) user = self.identity_api.create_user(user) user_ref = self.identity_api.get_user(user['id']) # 'email' attribute should've created because it is also being used # as user_id self.assertEqual(user_ref['id'], user_ref['email']) def test_user_id_attribute_map(self): driver = self.identity_api._select_identity_driver( CONF.identity.default_domain_id) driver.user.id_attr = 'mail' user_ref = self.identity_api.get_user(self.user_foo['email']) # the user_id_attribute map should be honored, which means # user_ref['id'] should contains the email attribute self.assertEqual(self.user_foo['email'], user_ref['id']) @mock.patch.object(common_ldap_core.BaseLdap, '_ldap_get') def test_get_id_from_dn_for_multivalued_attribute_id(self, mock_ldap_get): driver = self.identity_api._select_identity_driver( CONF.identity.default_domain_id) driver.user.id_attr = 'mail' # make 'email' multivalued so we can test the error condition email1 = uuid.uuid4().hex email2 = uuid.uuid4().hex mock_ldap_get.return_value = ( 'cn=nobodycares,dc=example,dc=com', { 'sn': [uuid.uuid4().hex], 'mail': [email1, email2], 'cn': 'nobodycares' } ) user_ref = self.identity_api.get_user(email1) # make sure we get the ID from DN (old behavior) if the ID attribute # has multiple values self.assertEqual('nobodycares', user_ref['id']) @mock.patch.object(common_ldap_core.BaseLdap, '_ldap_get') def test_id_attribute_not_found(self, mock_ldap_get): mock_ldap_get.return_value = ( 'cn=nobodycares,dc=example,dc=com', { 'sn': [uuid.uuid4().hex], } ) user_api = identity.backends.ldap.UserApi(CONF) self.assertRaises(exception.NotFound, user_api.get, 'nobodycares') @mock.patch.object(common_ldap_core.BaseLdap, '_ldap_get') def test_user_id_not_in_dn(self, mock_ldap_get): driver = self.identity_api._select_identity_driver( CONF.identity.default_domain_id) driver.user.id_attr = 'uid' driver.user.attribute_mapping['name'] = 'cn' mock_ldap_get.return_value = ( 'foo=bar,dc=example,dc=com', { 'sn': [uuid.uuid4().hex], 'foo': ['bar'], 'cn': ['junk'], 'uid': ['crap'] } ) user_ref = self.identity_api.get_user('crap') self.assertEqual('crap', user_ref['id']) self.assertEqual('junk', user_ref['name']) @mock.patch.object(common_ldap_core.BaseLdap, '_ldap_get') def test_user_name_in_dn(self, mock_ldap_get): driver = self.identity_api._select_identity_driver( CONF.identity.default_domain_id) driver.user.id_attr = 'SAMAccountName' driver.user.attribute_mapping['name'] = 'cn' mock_ldap_get.return_value = ( 'cn=Foo Bar,dc=example,dc=com', { 'sn': [uuid.uuid4().hex], 'cn': ['Foo Bar'], 'SAMAccountName': ['crap'] } ) user_ref = self.identity_api.get_user('crap') self.assertEqual('crap', user_ref['id']) self.assertEqual('Foo Bar', user_ref['name']) class LDAPLimitTests(unit.TestCase, identity_tests.LimitTests): def setUp(self): super(LDAPLimitTests, self).setUp() self.useFixture(ldapdb.LDAPDatabase()) self.useFixture(database.Database(self.sql_driver_version_overrides)) self.load_backends() self.load_fixtures(default_fixtures) identity_tests.LimitTests.setUp(self) _assert_backends(self, assignment='sql', identity='ldap', resource='sql') def config_overrides(self): super(LDAPLimitTests, self).config_overrides() self.config_fixture.config(group='identity', driver='ldap') self.config_fixture.config(group='identity', list_limit=len(default_fixtures.USERS) - 1) def config_files(self): config_files = super(LDAPLimitTests, self).config_files() config_files.append(unit.dirs.tests_conf('backend_ldap.conf')) return config_files def test_list_projects_filtered_and_limited(self): self.skipTest("ldap for storing projects is deprecated") class LDAPIdentityEnabledEmulation(LDAPIdentity): def setUp(self): super(LDAPIdentityEnabledEmulation, self).setUp() self.ldapdb.clear() self.load_backends() self.load_fixtures(default_fixtures) for obj in [self.tenant_bar, self.tenant_baz, self.user_foo, self.user_two, self.user_badguy]: obj.setdefault('enabled', True) _assert_backends(self, identity='ldap') def load_fixtures(self, fixtures): # Override super impl since need to create group container. create_group_container(self.identity_api) super(LDAPIdentity, self).load_fixtures(fixtures) def config_files(self): config_files = super(LDAPIdentityEnabledEmulation, self).config_files() config_files.append(unit.dirs.tests_conf('backend_ldap.conf')) return config_files def config_overrides(self): super(LDAPIdentityEnabledEmulation, self).config_overrides() self.config_fixture.config(group='ldap', user_enabled_emulation=True) def test_project_crud(self): # NOTE(topol): LDAPIdentityEnabledEmulation will create an # enabled key in the project dictionary so this # method override handles this side-effect project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id) project = self.resource_api.create_project(project['id'], project) project_ref = self.resource_api.get_project(project['id']) # self.resource_api.create_project adds an enabled # key with a value of True when LDAPIdentityEnabledEmulation # is used so we now add this expected key to the project dictionary project['enabled'] = True self.assertDictEqual(project, project_ref) project['description'] = uuid.uuid4().hex self.resource_api.update_project(project['id'], project) project_ref = self.resource_api.get_project(project['id']) self.assertDictEqual(project, project_ref) self.resource_api.delete_project(project['id']) self.assertRaises(exception.ProjectNotFound, self.resource_api.get_project, project['id']) @mock.patch.object(versionutils, 'report_deprecated_feature') def test_user_crud(self, mock_deprecator): # NOTE(stevemar): As of the Mitaka release, we now check for calls that # the LDAP write functionality has been deprecated. user_dict = self.new_user_ref( domain_id=CONF.identity.default_domain_id) user = self.identity_api.create_user(user_dict) args, _kwargs = mock_deprecator.call_args self.assertIn("create_user for the LDAP identity backend", args[1]) del user_dict['password'] user_ref = self.identity_api.get_user(user['id']) user_ref_dict = {x: user_ref[x] for x in user_ref} self.assertDictContainsSubset(user_dict, user_ref_dict) user_dict['password'] = uuid.uuid4().hex self.identity_api.update_user(user['id'], user_dict) args, _kwargs = mock_deprecator.call_args self.assertIn("update_user for the LDAP identity backend", args[1]) del user_dict['password'] user_ref = self.identity_api.get_user(user['id']) user_ref_dict = {x: user_ref[x] for x in user_ref} self.assertDictContainsSubset(user_dict, user_ref_dict) self.identity_api.delete_user(user['id']) args, _kwargs = mock_deprecator.call_args self.assertIn("delete_user for the LDAP identity backend", args[1]) self.assertRaises(exception.UserNotFound, self.identity_api.get_user, user['id']) def test_user_auth_emulated(self): driver = self.identity_api._select_identity_driver( CONF.identity.default_domain_id) driver.user.enabled_emulation_dn = 'cn=test,dc=test' self.identity_api.authenticate( context={}, user_id=self.user_foo['id'], password=self.user_foo['password']) def test_user_enable_attribute_mask(self): self.skipTest( "Enabled emulation conflicts with enabled mask") def test_user_enabled_use_group_config(self): self.config_fixture.config( group='ldap', user_enabled_emulation_use_group_config=True, group_member_attribute='uniqueMember', group_objectclass='groupOfUniqueNames') self.ldapdb.clear() self.load_backends() self.load_fixtures(default_fixtures) # Create a user and ensure they are enabled. user1 = unit.new_user_ref(enabled=True, domain_id=CONF.identity.default_domain_id) user_ref = self.identity_api.create_user(user1) self.assertIs(True, user_ref['enabled']) # Get a user and ensure they are enabled. user_ref = self.identity_api.get_user(user_ref['id']) self.assertIs(True, user_ref['enabled']) def test_user_enabled_invert(self): self.config_fixture.config(group='ldap', user_enabled_invert=True, user_enabled_default=False) self.ldapdb.clear() self.load_backends() self.load_fixtures(default_fixtures) user1 = self.new_user_ref(domain_id=CONF.identity.default_domain_id) user2 = self.new_user_ref(enabled=False, domain_id=CONF.identity.default_domain_id) user3 = self.new_user_ref(domain_id=CONF.identity.default_domain_id) # Ensure that the enabled LDAP attribute is not set for a # newly created enabled user. user_ref = self.identity_api.create_user(user1) self.assertIs(True, user_ref['enabled']) self.assertIsNone(self.get_user_enabled_vals(user_ref)) user_ref = self.identity_api.get_user(user_ref['id']) self.assertIs(True, user_ref['enabled']) # Ensure that an enabled LDAP attribute is not set for a disabled user. user1['enabled'] = False user_ref = self.identity_api.update_user(user_ref['id'], user1) self.assertIs(False, user_ref['enabled']) self.assertIsNone(self.get_user_enabled_vals(user_ref)) # Enable the user and ensure that the LDAP enabled # attribute is not set. user1['enabled'] = True user_ref = self.identity_api.update_user(user_ref['id'], user1) self.assertIs(True, user_ref['enabled']) self.assertIsNone(self.get_user_enabled_vals(user_ref)) # Ensure that the LDAP enabled attribute is not set for a # newly created disabled user. user_ref = self.identity_api.create_user(user2) self.assertIs(False, user_ref['enabled']) self.assertIsNone(self.get_user_enabled_vals(user_ref)) user_ref = self.identity_api.get_user(user_ref['id']) self.assertIs(False, user_ref['enabled']) # Ensure that the LDAP enabled attribute is not set for a newly created # user when the user_enabled_default setting is used. user_ref = self.identity_api.create_user(user3) self.assertIs(True, user_ref['enabled']) self.assertIsNone(self.get_user_enabled_vals(user_ref)) user_ref = self.identity_api.get_user(user_ref['id']) self.assertIs(True, user_ref['enabled']) def test_user_enabled_invert_no_enabled_value(self): self.skipTest( "N/A: Covered by test_user_enabled_invert") def test_user_enabled_invert_default_str_value(self): self.skipTest( "N/A: Covered by test_user_enabled_invert") @mock.patch.object(common_ldap_core.BaseLdap, '_ldap_get') def test_user_enabled_attribute_handles_utf8(self, mock_ldap_get): # Since user_enabled_emulation is enabled in this test, this test will # fail since it's using user_enabled_invert. self.config_fixture.config(group='ldap', user_enabled_invert=True, user_enabled_attribute='passwordisexpired') mock_ldap_get.return_value = ( u'uid=123456789,c=us,ou=our_ldap,o=acme.com', { 'uid': [123456789], 'mail': [u'shaun@acme.com'], 'passwordisexpired': [u'false'], 'cn': [u'uid=123456789,c=us,ou=our_ldap,o=acme.com'] } ) user_api = identity.backends.ldap.UserApi(CONF) user_ref = user_api.get('123456789') self.assertIs(False, user_ref['enabled']) def test_escape_member_dn(self): # The enabled member DN is properly escaped when querying for enabled # user. object_id = uuid.uuid4().hex driver = self.identity_api._select_identity_driver( CONF.identity.default_domain_id) # driver.user is the EnabledEmuMixIn implementation used for this test. mixin_impl = driver.user # ) is a special char in a filter and must be escaped. sample_dn = 'cn=foo)bar' # LDAP requires ) is escaped by being replaced with "\29" sample_dn_filter_esc = r'cn=foo\29bar' # Override the tree_dn, it's used to build the enabled member filter mixin_impl.tree_dn = sample_dn # The filter that _get_enabled is going to build contains the # tree_dn, which better be escaped in this case. exp_filter = '(%s=%s=%s,%s)' % ( mixin_impl.member_attribute, mixin_impl.id_attr, object_id, sample_dn_filter_esc) with mixin_impl.get_connection() as conn: m = self.useFixture(mockpatch.PatchObject(conn, 'search_s')).mock mixin_impl._get_enabled(object_id, conn) # The 3rd argument is the DN. self.assertEqual(exp_filter, m.call_args[0][2]) class LDAPPosixGroupsTest(unit.TestCase): def setUp(self): super(LDAPPosixGroupsTest, self).setUp() self.useFixture(ldapdb.LDAPDatabase()) self.useFixture(database.Database()) self.load_backends() self.load_fixtures(default_fixtures) _assert_backends(self, identity='ldap') def load_fixtures(self, fixtures): # Override super impl since need to create group container. create_group_container(self.identity_api) super(LDAPPosixGroupsTest, self).load_fixtures(fixtures) def config_overrides(self): super(LDAPPosixGroupsTest, self).config_overrides() self.config_fixture.config(group='identity', driver='ldap') self.config_fixture.config(group='ldap', group_members_are_ids=True, group_member_attribute='memberUID') def config_files(self): config_files = super(LDAPPosixGroupsTest, self).config_files() config_files.append(unit.dirs.tests_conf('backend_ldap.conf')) return config_files def _get_domain_fixture(self): """Domains in LDAP are read-only, so just return the static one.""" return self.resource_api.get_domain(CONF.identity.default_domain_id) def test_posix_member_id(self): domain = self._get_domain_fixture() new_group = unit.new_group_ref(domain_id=domain['id']) new_group = self.identity_api.create_group(new_group) # Make sure we get an empty list back on a new group, not an error. user_refs = self.identity_api.list_users_in_group(new_group['id']) self.assertEqual([], user_refs) # Make sure we get the correct users back once they have been added # to the group. new_user = unit.new_user_ref(domain_id=domain['id']) new_user = self.identity_api.create_user(new_user) # NOTE(amakarov): Create the group directly using LDAP operations # rather than going through the manager. group_api = self.identity_api.driver.group group_ref = group_api.get(new_group['id']) mod = (ldap.MOD_ADD, group_api.member_attribute, new_user['id']) conn = group_api.get_connection() conn.modify_s(group_ref['dn'], [mod]) # Testing the case "the group contains a user" user_refs = self.identity_api.list_users_in_group(new_group['id']) self.assertIn(new_user['id'], (x['id'] for x in user_refs)) # Testing the case "the user is a member of a group" group_refs = self.identity_api.list_groups_for_user(new_user['id']) self.assertIn(new_group['id'], (x['id'] for x in group_refs)) class LdapIdentityWithMapping( BaseLDAPIdentity, unit.SQLDriverOverrides, unit.TestCase): """Class to test mapping of default LDAP backend. The default configuration is not to enable mapping when using a single backend LDAP driver. However, a cloud provider might want to enable the mapping, hence hiding the LDAP IDs from any clients of keystone. Setting backward_compatible_ids to False will enable this mapping. """ def config_files(self): config_files = super(LdapIdentityWithMapping, self).config_files() config_files.append(unit.dirs.tests_conf('backend_ldap_sql.conf')) return config_files def setUp(self): sqldb = self.useFixture(database.Database()) super(LdapIdentityWithMapping, self).setUp() self.ldapdb.clear() self.load_backends() cache.configure_cache() sqldb.recreate() self.load_fixtures(default_fixtures) # defaulted by the data load self.user_foo['enabled'] = True _assert_backends(self, identity='ldap') def config_overrides(self): super(LdapIdentityWithMapping, self).config_overrides() self.config_fixture.config(group='identity', driver='ldap') self.config_fixture.config(group='identity_mapping', backward_compatible_ids=False) def test_dynamic_mapping_build(self): """Test to ensure entities not create via controller are mapped. Many LDAP backends will, essentially, by Read Only. In these cases the mapping is not built by creating objects, rather from enumerating the entries. We test this here my manually deleting the mapping and then trying to re-read the entries. """ initial_mappings = len(mapping_sql.list_id_mappings()) user1 = self.new_user_ref(domain_id=CONF.identity.default_domain_id) user1 = self.identity_api.create_user(user1) user2 = self.new_user_ref(domain_id=CONF.identity.default_domain_id) user2 = self.identity_api.create_user(user2) mappings = mapping_sql.list_id_mappings() self.assertEqual(initial_mappings + 2, len(mappings)) # Now delete the mappings for the two users above self.id_mapping_api.purge_mappings({'public_id': user1['id']}) self.id_mapping_api.purge_mappings({'public_id': user2['id']}) # We should no longer be able to get these users via their old IDs self.assertRaises(exception.UserNotFound, self.identity_api.get_user, user1['id']) self.assertRaises(exception.UserNotFound, self.identity_api.get_user, user2['id']) # Now enumerate all users...this should re-build the mapping, and # we should be able to find the users via their original public IDs. self.identity_api.list_users() self.identity_api.get_user(user1['id']) self.identity_api.get_user(user2['id']) def test_get_roles_for_user_and_project_user_group_same_id(self): self.skipTest('N/A: We never generate the same ID for a user and ' 'group in our mapping table') def test_list_domains(self): domains = self.resource_api.list_domains() self.assertEqual([resource.calc_default_domain()], domains) class BaseMultiLDAPandSQLIdentity(object): """Mixin class with support methods for domain-specific config testing.""" def create_users_across_domains(self): """Create a set of users, each with a role on their own domain.""" # We also will check that the right number of id mappings get created initial_mappings = len(mapping_sql.list_id_mappings()) self.users['user0'] = unit.create_user( self.identity_api, self.domains['domain_default']['id']) self.assignment_api.create_grant( user_id=self.users['user0']['id'], domain_id=self.domains['domain_default']['id'], role_id=self.role_member['id']) for x in range(1, self.domain_count): self.users['user%s' % x] = unit.create_user( self.identity_api, self.domains['domain%s' % x]['id']) self.assignment_api.create_grant( user_id=self.users['user%s' % x]['id'], domain_id=self.domains['domain%s' % x]['id'], role_id=self.role_member['id']) # So how many new id mappings should have been created? One for each # user created in a domain that is using the non default driver.. self.assertEqual(initial_mappings + self.domain_specific_count, len(mapping_sql.list_id_mappings())) def check_user(self, user, domain_id, expected_status): """Check user is in correct backend. As part of the tests, we want to force ourselves to manually select the driver for a given domain, to make sure the entity ended up in the correct backend. """ driver = self.identity_api._select_identity_driver(domain_id) unused, unused, entity_id = ( self.identity_api._get_domain_driver_and_entity_id( user['id'])) if expected_status == http_client.OK: ref = driver.get_user(entity_id) ref = self.identity_api._set_domain_id_and_mapping( ref, domain_id, driver, map.EntityType.USER) user = user.copy() del user['password'] self.assertDictEqual(user, ref) else: # TODO(henry-nash): Use AssertRaises here, although # there appears to be an issue with using driver.get_user # inside that construct try: driver.get_user(entity_id) except expected_status: pass def setup_initial_domains(self): def create_domain(domain): try: ref = self.resource_api.create_domain( domain['id'], domain) except exception.Conflict: ref = ( self.resource_api.get_domain_by_name(domain['name'])) return ref self.domains = {} for x in range(1, self.domain_count): domain = 'domain%s' % x self.domains[domain] = create_domain( {'id': uuid.uuid4().hex, 'name': domain}) self.domains['domain_default'] = create_domain( resource.calc_default_domain()) def test_authenticate_to_each_domain(self): """Test that a user in each domain can authenticate.""" for user_num in range(self.domain_count): user = 'user%s' % user_num self.identity_api.authenticate( context={}, user_id=self.users[user]['id'], password=self.users[user]['password']) class MultiLDAPandSQLIdentity(BaseLDAPIdentity, unit.SQLDriverOverrides, unit.TestCase, BaseMultiLDAPandSQLIdentity): """Class to test common SQL plus individual LDAP backends. We define a set of domains and domain-specific backends: - A separate LDAP backend for the default domain - A separate LDAP backend for domain1 - domain2 shares the same LDAP as domain1, but uses a different tree attach point - An SQL backend for all other domains (which will include domain3 and domain4) Normally one would expect that the default domain would be handled as part of the "other domains" - however the above provides better test coverage since most of the existing backend tests use the default domain. """ def setUp(self): sqldb = self.useFixture(database.Database()) super(MultiLDAPandSQLIdentity, self).setUp() self.load_backends() sqldb.recreate() self.domain_count = 5 self.domain_specific_count = 3 self.setup_initial_domains() self._setup_initial_users() # All initial test data setup complete, time to switch on support # for separate backends per domain. self.enable_multi_domain() self.ldapdb.clear() self.load_fixtures(default_fixtures) self.create_users_across_domains() self.assert_backends() def assert_backends(self): _assert_backends(self, assignment='sql', identity={ None: 'sql', self.domains['domain_default']['id']: 'ldap', self.domains['domain1']['id']: 'ldap', self.domains['domain2']['id']: 'ldap', }, resource='sql') def config_overrides(self): super(MultiLDAPandSQLIdentity, self).config_overrides() # Make sure identity and assignment are actually SQL drivers, # BaseLDAPIdentity sets these options to use LDAP. self.config_fixture.config(group='identity', driver='sql') self.config_fixture.config(group='resource', driver='sql') self.config_fixture.config(group='assignment', driver='sql') def _setup_initial_users(self): # Create some identity entities BEFORE we switch to multi-backend, so # we can test that these are still accessible self.users = {} self.users['userA'] = unit.create_user( self.identity_api, self.domains['domain_default']['id']) self.users['userB'] = unit.create_user( self.identity_api, self.domains['domain1']['id']) self.users['userC'] = unit.create_user( self.identity_api, self.domains['domain3']['id']) def enable_multi_domain(self): """Enable the chosen form of multi domain configuration support. This method enables the file-based configuration support. Child classes that wish to use the database domain configuration support should override this method and set the appropriate config_fixture option. """ self.config_fixture.config( group='identity', domain_specific_drivers_enabled=True, domain_config_dir=unit.TESTCONF + '/domain_configs_multi_ldap', list_limit=1000) self.config_fixture.config(group='identity_mapping', backward_compatible_ids=False) def get_config(self, domain_id): # Get the config for this domain, will return CONF # if no specific config defined for this domain return self.identity_api.domain_configs.get_domain_conf(domain_id) def test_list_users(self): # Override the standard list users, since we have added an extra user # to the default domain, so the number of expected users is one more # than in the standard test. users = self.identity_api.list_users( domain_scope=self._set_domain_scope( CONF.identity.default_domain_id)) self.assertEqual(len(default_fixtures.USERS) + 1, len(users)) user_ids = set(user['id'] for user in users) expected_user_ids = set(getattr(self, 'user_%s' % user['id'])['id'] for user in default_fixtures.USERS) expected_user_ids.add(self.users['user0']['id']) for user_ref in users: self.assertNotIn('password', user_ref) self.assertEqual(expected_user_ids, user_ids) @mock.patch.object(common_ldap_core.BaseLdap, '_ldap_get_all') def test_list_limit_domain_specific_inheritance(self, ldap_get_all): # passiging hints is important, because if it's not passed, limiting # is considered be disabled hints = driver_hints.Hints() self.identity_api.list_users( domain_scope=self.domains['domain2']['id'], hints=hints) # since list_limit is not specified in keystone.domain2.conf, it should # take the default, which is 1000 self.assertTrue(ldap_get_all.called) args, kwargs = ldap_get_all.call_args hints = args[0] self.assertEqual(1000, hints.limit['limit']) @mock.patch.object(common_ldap_core.BaseLdap, '_ldap_get_all') def test_list_limit_domain_specific_override(self, ldap_get_all): # passiging hints is important, because if it's not passed, limiting # is considered to be disabled hints = driver_hints.Hints() self.identity_api.list_users( domain_scope=self.domains['domain1']['id'], hints=hints) # this should have the list_limit set in Keystone.domain1.conf, which # is 101 self.assertTrue(ldap_get_all.called) args, kwargs = ldap_get_all.call_args hints = args[0] self.assertEqual(101, hints.limit['limit']) def test_domain_segregation(self): """Test that separate configs have segregated the domain. Test Plan: - Users were created in each domain as part of setup, now make sure you can only find a given user in its relevant domain/backend - Make sure that for a backend that supports multiple domains you can get the users via any of its domains """ # Check that I can read a user with the appropriate domain-selected # driver, but won't find it via any other domain driver check_user = self.check_user check_user(self.users['user0'], self.domains['domain_default']['id'], http_client.OK) for domain in [self.domains['domain1']['id'], self.domains['domain2']['id'], self.domains['domain3']['id'], self.domains['domain4']['id']]: check_user(self.users['user0'], domain, exception.UserNotFound) check_user(self.users['user1'], self.domains['domain1']['id'], http_client.OK) for domain in [self.domains['domain_default']['id'], self.domains['domain2']['id'], self.domains['domain3']['id'], self.domains['domain4']['id']]: check_user(self.users['user1'], domain, exception.UserNotFound) check_user(self.users['user2'], self.domains['domain2']['id'], http_client.OK) for domain in [self.domains['domain_default']['id'], self.domains['domain1']['id'], self.domains['domain3']['id'], self.domains['domain4']['id']]: check_user(self.users['user2'], domain, exception.UserNotFound) # domain3 and domain4 share the same backend, so you should be # able to see user3 and user4 from either. check_user(self.users['user3'], self.domains['domain3']['id'], http_client.OK) check_user(self.users['user3'], self.domains['domain4']['id'], http_client.OK) check_user(self.users['user4'], self.domains['domain3']['id'], http_client.OK) check_user(self.users['user4'], self.domains['domain4']['id'], http_client.OK) for domain in [self.domains['domain_default']['id'], self.domains['domain1']['id'], self.domains['domain2']['id']]: check_user(self.users['user3'], domain, exception.UserNotFound) check_user(self.users['user4'], domain, exception.UserNotFound) # Finally, going through the regular manager layer, make sure we # only see the right number of users in each of the non-default # domains. One might have expected two users in domain1 (since we # created one before we switched to multi-backend), however since # that domain changed backends in the switch we don't find it anymore. # This is as designed - we don't support moving domains between # backends. # # The listing of the default domain is already handled in the # test_lists_users() method. for domain in [self.domains['domain1']['id'], self.domains['domain2']['id'], self.domains['domain4']['id']]: self.assertThat( self.identity_api.list_users(domain_scope=domain), matchers.HasLength(1)) # domain3 had a user created before we switched on # multiple backends, plus one created afterwards - and its # backend has not changed - so we should find two. self.assertThat( self.identity_api.list_users( domain_scope=self.domains['domain3']['id']), matchers.HasLength(2)) def test_existing_uuids_work(self): """Test that 'uni-domain' created IDs still work. Throwing the switch to domain-specific backends should not cause existing identities to be inaccessible via ID. """ self.identity_api.get_user(self.users['userA']['id']) self.identity_api.get_user(self.users['userB']['id']) self.identity_api.get_user(self.users['userC']['id']) def test_scanning_of_config_dir(self): """Test the Manager class scans the config directory. The setup for the main tests above load the domain configs directly so that the test overrides can be included. This test just makes sure that the standard config directory scanning does pick up the relevant domain config files. """ # Confirm that config has drivers_enabled as True, which we will # check has been set to False later in this test self.assertTrue(CONF.identity.domain_specific_drivers_enabled) self.load_backends() # Execute any command to trigger the lazy loading of domain configs self.identity_api.list_users( domain_scope=self.domains['domain1']['id']) # ...and now check the domain configs have been set up self.assertIn('default', self.identity_api.domain_configs) self.assertIn(self.domains['domain1']['id'], self.identity_api.domain_configs) self.assertIn(self.domains['domain2']['id'], self.identity_api.domain_configs) self.assertNotIn(self.domains['domain3']['id'], self.identity_api.domain_configs) self.assertNotIn(self.domains['domain4']['id'], self.identity_api.domain_configs) # Finally check that a domain specific config contains items from both # the primary config and the domain specific config conf = self.identity_api.domain_configs.get_domain_conf( self.domains['domain1']['id']) # This should now be false, as is the default, since this is not # set in the standard primary config file self.assertFalse(conf.identity.domain_specific_drivers_enabled) # ..and make sure a domain-specific options is also set self.assertEqual('fake://memory1', conf.ldap.url) def test_delete_domain_with_user_added(self): domain = unit.new_domain_ref() project = unit.new_project_ref(domain_id=domain['id']) self.resource_api.create_domain(domain['id'], domain) project = self.resource_api.create_project(project['id'], project) project_ref = self.resource_api.get_project(project['id']) self.assertDictEqual(project, project_ref) self.assignment_api.create_grant(user_id=self.user_foo['id'], project_id=project['id'], role_id=self.role_member['id']) self.assignment_api.delete_grant(user_id=self.user_foo['id'], project_id=project['id'], role_id=self.role_member['id']) domain['enabled'] = False self.resource_api.update_domain(domain['id'], domain) self.resource_api.delete_domain(domain['id']) self.assertRaises(exception.DomainNotFound, self.resource_api.get_domain, domain['id']) def test_user_enabled_ignored_disable_error(self): # Override. self.skipTest("Doesn't apply since LDAP config has no affect on the " "SQL identity backend.") def test_group_enabled_ignored_disable_error(self): # Override. self.skipTest("Doesn't apply since LDAP config has no affect on the " "SQL identity backend.") def test_project_enabled_ignored_disable_error(self): # Override self.skipTest("Doesn't apply since LDAP configuration is ignored for " "SQL assignment backend.") def test_list_role_assignments_filtered_by_role(self): # Domain roles are supported by the SQL Assignment backend base = super(BaseLDAPIdentity, self) base.test_list_role_assignments_filtered_by_role() def test_list_role_assignment_by_domain(self): # With multi LDAP this method should work, so override the override # from BaseLDAPIdentity super(BaseLDAPIdentity, self).test_list_role_assignment_by_domain() def test_list_role_assignment_by_user_with_domain_group_roles(self): # With multi LDAP this method should work, so override the override # from BaseLDAPIdentity super(BaseLDAPIdentity, self).\ test_list_role_assignment_by_user_with_domain_group_roles() def test_list_role_assignment_using_sourced_groups_with_domains(self): # With SQL Assignment this method should work, so override the override # from BaseLDAPIdentity base = super(BaseLDAPIdentity, self) base.test_list_role_assignment_using_sourced_groups_with_domains() def test_create_project_with_domain_id_and_without_parent_id(self): # With multi LDAP this method should work, so override the override # from BaseLDAPIdentity super(BaseLDAPIdentity, self).\ test_create_project_with_domain_id_and_without_parent_id() def test_create_project_with_domain_id_mismatch_to_parent_domain(self): # With multi LDAP this method should work, so override the override # from BaseLDAPIdentity super(BaseLDAPIdentity, self).\ test_create_project_with_domain_id_mismatch_to_parent_domain() def test_remove_foreign_assignments_when_deleting_a_domain(self): # With multi LDAP this method should work, so override the override # from BaseLDAPIdentity base = super(BaseLDAPIdentity, self) base.test_remove_foreign_assignments_when_deleting_a_domain() class MultiLDAPandSQLIdentityDomainConfigsInSQL(MultiLDAPandSQLIdentity): """Class to test the use of domain configs stored in the database. Repeat the same tests as MultiLDAPandSQLIdentity, but instead of using the domain specific config files, store the domain specific values in the database. """ def assert_backends(self): _assert_backends(self, assignment='sql', identity={ None: 'sql', self.domains['domain_default']['id']: 'ldap', self.domains['domain1']['id']: 'ldap', self.domains['domain2']['id']: 'ldap', }, resource='sql') def enable_multi_domain(self): # The values below are the same as in the domain_configs_multi_ldap # directory of test config_files. default_config = { 'ldap': {'url': 'fake://memory', 'user': 'cn=Admin', 'password': 'password', 'suffix': 'cn=example,cn=com'}, 'identity': {'driver': 'ldap'} } domain1_config = { 'ldap': {'url': 'fake://memory1', 'user': 'cn=Admin', 'password': 'password', 'suffix': 'cn=example,cn=com'}, 'identity': {'driver': 'ldap', 'list_limit': '101'} } domain2_config = { 'ldap': {'url': 'fake://memory', 'user': 'cn=Admin', 'password': 'password', 'suffix': 'cn=myroot,cn=com', 'group_tree_dn': 'ou=UserGroups,dc=myroot,dc=org', 'user_tree_dn': 'ou=Users,dc=myroot,dc=org'}, 'identity': {'driver': 'ldap'} } self.domain_config_api.create_config(CONF.identity.default_domain_id, default_config) self.domain_config_api.create_config(self.domains['domain1']['id'], domain1_config) self.domain_config_api.create_config(self.domains['domain2']['id'], domain2_config) self.config_fixture.config( group='identity', domain_specific_drivers_enabled=True, domain_configurations_from_database=True, list_limit=1000) self.config_fixture.config(group='identity_mapping', backward_compatible_ids=False) def test_domain_config_has_no_impact_if_database_support_disabled(self): """Ensure database domain configs have no effect if disabled. Set reading from database configs to false, restart the backends and then try and set and use database configs. """ self.config_fixture.config( group='identity', domain_configurations_from_database=False) self.load_backends() new_config = {'ldap': {'url': uuid.uuid4().hex}} self.domain_config_api.create_config( CONF.identity.default_domain_id, new_config) # Trigger the identity backend to initialise any domain specific # configurations self.identity_api.list_users() # Check that the new config has not been passed to the driver for # the default domain. default_config = ( self.identity_api.domain_configs.get_domain_conf( CONF.identity.default_domain_id)) self.assertEqual(CONF.ldap.url, default_config.ldap.url) def test_reloading_domain_config(self): """Ensure domain drivers are reloaded on a config modification.""" domain_cfgs = self.identity_api.domain_configs # Create a new config for the default domain, hence overwriting the # current settings. new_config = { 'ldap': {'url': uuid.uuid4().hex}, 'identity': {'driver': 'ldap'}} self.domain_config_api.create_config( CONF.identity.default_domain_id, new_config) default_config = ( domain_cfgs.get_domain_conf(CONF.identity.default_domain_id)) self.assertEqual(new_config['ldap']['url'], default_config.ldap.url) # Ensure updating is also honored updated_config = {'url': uuid.uuid4().hex} self.domain_config_api.update_config( CONF.identity.default_domain_id, updated_config, group='ldap', option='url') default_config = ( domain_cfgs.get_domain_conf(CONF.identity.default_domain_id)) self.assertEqual(updated_config['url'], default_config.ldap.url) # ...and finally ensure delete causes the driver to get the standard # config again. self.domain_config_api.delete_config(CONF.identity.default_domain_id) default_config = ( domain_cfgs.get_domain_conf(CONF.identity.default_domain_id)) self.assertEqual(CONF.ldap.url, default_config.ldap.url) def test_setting_multiple_sql_driver_raises_exception(self): """Ensure setting multiple domain specific sql drivers is prevented.""" new_config = {'identity': {'driver': 'sql'}} self.domain_config_api.create_config( CONF.identity.default_domain_id, new_config) self.identity_api.domain_configs.get_domain_conf( CONF.identity.default_domain_id) self.domain_config_api.create_config(self.domains['domain1']['id'], new_config) self.assertRaises(exception.MultipleSQLDriversInConfig, self.identity_api.domain_configs.get_domain_conf, self.domains['domain1']['id']) def test_same_domain_gets_sql_driver(self): """Ensure we can set an SQL driver if we have had it before.""" new_config = {'identity': {'driver': 'sql'}} self.domain_config_api.create_config( CONF.identity.default_domain_id, new_config) self.identity_api.domain_configs.get_domain_conf( CONF.identity.default_domain_id) # By using a slightly different config, we cause the driver to be # reloaded...and hence check if we can reuse the sql driver new_config = {'identity': {'driver': 'sql'}, 'ldap': {'url': 'fake://memory1'}} self.domain_config_api.create_config( CONF.identity.default_domain_id, new_config) self.identity_api.domain_configs.get_domain_conf( CONF.identity.default_domain_id) def test_delete_domain_clears_sql_registration(self): """Ensure registration is deleted when a domain is deleted.""" domain = unit.new_domain_ref() domain = self.resource_api.create_domain(domain['id'], domain) new_config = {'identity': {'driver': 'sql'}} self.domain_config_api.create_config(domain['id'], new_config) self.identity_api.domain_configs.get_domain_conf(domain['id']) # First show that trying to set SQL for another driver fails self.domain_config_api.create_config(self.domains['domain1']['id'], new_config) self.assertRaises(exception.MultipleSQLDriversInConfig, self.identity_api.domain_configs.get_domain_conf, self.domains['domain1']['id']) self.domain_config_api.delete_config(self.domains['domain1']['id']) # Now we delete the domain domain['enabled'] = False self.resource_api.update_domain(domain['id'], domain) self.resource_api.delete_domain(domain['id']) # The registration should now be available self.domain_config_api.create_config(self.domains['domain1']['id'], new_config) self.identity_api.domain_configs.get_domain_conf( self.domains['domain1']['id']) def test_orphaned_registration_does_not_prevent_getting_sql_driver(self): """Ensure we self heal an orphaned sql registration.""" domain = unit.new_domain_ref() domain = self.resource_api.create_domain(domain['id'], domain) new_config = {'identity': {'driver': 'sql'}} self.domain_config_api.create_config(domain['id'], new_config) self.identity_api.domain_configs.get_domain_conf(domain['id']) # First show that trying to set SQL for another driver fails self.domain_config_api.create_config(self.domains['domain1']['id'], new_config) self.assertRaises(exception.MultipleSQLDriversInConfig, self.identity_api.domain_configs.get_domain_conf, self.domains['domain1']['id']) # Now we delete the domain by using the backend driver directly, # which causes the domain to be deleted without any of the cleanup # that is in the manager (this is simulating a server process crashing # in the middle of a delete domain operation, and somehow leaving the # domain config settings in place, but the domain is deleted). We # should still be able to set another domain to SQL, since we should # self heal this issue. self.resource_api.driver.delete_project(domain['id']) # Invalidate cache (so we will see the domain has gone) self.resource_api.get_domain.invalidate( self.resource_api, domain['id']) # The registration should now be available self.domain_config_api.create_config(self.domains['domain1']['id'], new_config) self.identity_api.domain_configs.get_domain_conf( self.domains['domain1']['id']) class DomainSpecificLDAPandSQLIdentity( BaseLDAPIdentity, unit.SQLDriverOverrides, unit.TestCase, BaseMultiLDAPandSQLIdentity): """Class to test when all domains use specific configs, including SQL. We define a set of domains and domain-specific backends: - A separate LDAP backend for the default domain - A separate SQL backend for domain1 Although the default driver still exists, we don't use it. """ def setUp(self): sqldb = self.useFixture(database.Database()) super(DomainSpecificLDAPandSQLIdentity, self).setUp() self.initial_setup(sqldb) def initial_setup(self, sqldb): # We aren't setting up any initial data ahead of switching to # domain-specific operation, so make the switch straight away. self.config_fixture.config( group='identity', domain_specific_drivers_enabled=True, domain_config_dir=( unit.TESTCONF + '/domain_configs_one_sql_one_ldap')) self.config_fixture.config(group='identity_mapping', backward_compatible_ids=False) self.load_backends() sqldb.recreate() self.domain_count = 2 self.domain_specific_count = 2 self.setup_initial_domains() self.users = {} self.ldapdb.clear() self.load_fixtures(default_fixtures) self.create_users_across_domains() _assert_backends( self, assignment='sql', identity={ None: 'ldap', 'default': 'ldap', self.domains['domain1']['id']: 'sql', }, resource='sql') def config_overrides(self): super(DomainSpecificLDAPandSQLIdentity, self).config_overrides() # Make sure resource & assignment are actually SQL drivers, # BaseLDAPIdentity causes this option to use LDAP. self.config_fixture.config(group='resource', driver='sql') self.config_fixture.config(group='assignment', driver='sql') def get_config(self, domain_id): # Get the config for this domain, will return CONF # if no specific config defined for this domain return self.identity_api.domain_configs.get_domain_conf(domain_id) def test_list_domains(self): self.skipTest( 'N/A: Not relevant for multi ldap testing') def test_list_domains_non_default_domain_id(self): self.skipTest( 'N/A: Not relevant for multi ldap testing') def test_domain_crud(self): self.skipTest( 'N/A: Not relevant for multi ldap testing') def test_not_delete_domain_with_enabled_subdomains(self): self.skipTest( 'N/A: Not relevant for multi ldap testing') def test_delete_domain(self): # With this restricted multi LDAP class, tests that use multiple # domains and identity, are still not supported self.assertRaises( exception.DomainNotFound, super(BaseLDAPIdentity, self).test_delete_domain_with_project_api) def test_list_users(self): # Override the standard list users, since we have added an extra user # to the default domain, so the number of expected users is one more # than in the standard test. users = self.identity_api.list_users( domain_scope=self._set_domain_scope( CONF.identity.default_domain_id)) self.assertEqual(len(default_fixtures.USERS) + 1, len(users)) user_ids = set(user['id'] for user in users) expected_user_ids = set(getattr(self, 'user_%s' % user['id'])['id'] for user in default_fixtures.USERS) expected_user_ids.add(self.users['user0']['id']) for user_ref in users: self.assertNotIn('password', user_ref) self.assertEqual(expected_user_ids, user_ids) def test_domain_segregation(self): """Test that separate configs have segregated the domain. Test Plan: - Users were created in each domain as part of setup, now make sure you can only find a given user in its relevant domain/backend - Make sure that for a backend that supports multiple domains you can get the users via any of its domains """ # Check that I can read a user with the appropriate domain-selected # driver, but won't find it via any other domain driver self.check_user(self.users['user0'], self.domains['domain_default']['id'], http_client.OK) self.check_user(self.users['user0'], self.domains['domain1']['id'], exception.UserNotFound) self.check_user(self.users['user1'], self.domains['domain1']['id'], http_client.OK) self.check_user(self.users['user1'], self.domains['domain_default']['id'], exception.UserNotFound) # Finally, going through the regular manager layer, make sure we # only see the right number of users in the non-default domain. self.assertThat( self.identity_api.list_users( domain_scope=self.domains['domain1']['id']), matchers.HasLength(1)) def test_add_role_grant_to_user_and_project_returns_not_found(self): self.skipTest('Blocked by bug 1101287') def test_get_role_grants_for_user_and_project_returns_not_found(self): self.skipTest('Blocked by bug 1101287') def test_list_projects_for_user_with_grants(self): self.skipTest('Blocked by bug 1221805') def test_get_roles_for_user_and_project_user_group_same_id(self): self.skipTest('N/A: We never generate the same ID for a user and ' 'group in our mapping table') def test_user_id_comma(self): self.skipTest('Only valid if it is guaranteed to be talking to ' 'the fakeldap backend') def test_user_id_comma_grants(self): self.skipTest('Only valid if it is guaranteed to be talking to ' 'the fakeldap backend') def test_user_enabled_ignored_disable_error(self): # Override. self.skipTest("Doesn't apply since LDAP config has no affect on the " "SQL identity backend.") def test_group_enabled_ignored_disable_error(self): # Override. self.skipTest("Doesn't apply since LDAP config has no affect on the " "SQL identity backend.") def test_project_enabled_ignored_disable_error(self): # Override self.skipTest("Doesn't apply since LDAP configuration is ignored for " "SQL assignment backend.") def test_list_role_assignments_filtered_by_role(self): # Domain roles are supported by the SQL Assignment backend base = super(BaseLDAPIdentity, self) base.test_list_role_assignments_filtered_by_role() def test_delete_domain_with_project_api(self): # With this restricted multi LDAP class, tests that use multiple # domains and identity, are still not supported self.assertRaises( exception.DomainNotFound, super(BaseLDAPIdentity, self).test_delete_domain_with_project_api) def test_create_project_with_domain_id_and_without_parent_id(self): # With restricted multi LDAP, tests that don't use identity, but do # required aditional domains will work base = super(BaseLDAPIdentity, self) base.test_create_project_with_domain_id_and_without_parent_id() def test_create_project_with_domain_id_mismatch_to_parent_domain(self): # With restricted multi LDAP, tests that don't use identity, but do # required aditional domains will work base = super(BaseLDAPIdentity, self) base.test_create_project_with_domain_id_mismatch_to_parent_domain() class DomainSpecificSQLIdentity(DomainSpecificLDAPandSQLIdentity): """Class to test simplest use of domain-specific SQL driver. The simplest use of an SQL domain-specific backend is when it is used to augment the standard case when LDAP is the default driver defined in the main config file. This would allow, for example, service users to be stored in SQL while LDAP handles the rest. Hence we define: - The default driver uses the LDAP backend for the default domain - A separate SQL backend for domain1 """ def initial_setup(self, sqldb): # We aren't setting up any initial data ahead of switching to # domain-specific operation, so make the switch straight away. self.config_fixture.config( group='identity', domain_specific_drivers_enabled=True, domain_config_dir=( unit.TESTCONF + '/domain_configs_default_ldap_one_sql')) # Part of the testing counts how many new mappings get created as # we create users, so ensure we are NOT using mapping for the default # LDAP domain so this doesn't confuse the calculation. self.config_fixture.config(group='identity_mapping', backward_compatible_ids=True) self.load_backends() sqldb.recreate() self.domain_count = 2 self.domain_specific_count = 1 self.setup_initial_domains() self.users = {} self.load_fixtures(default_fixtures) self.create_users_across_domains() _assert_backends(self, assignment='sql', identity='ldap', resource='sql') def config_overrides(self): super(DomainSpecificSQLIdentity, self).config_overrides() self.config_fixture.config(group='identity', driver='ldap') self.config_fixture.config(group='resource', driver='sql') self.config_fixture.config(group='assignment', driver='sql') def get_config(self, domain_id): if domain_id == CONF.identity.default_domain_id: return CONF else: return self.identity_api.domain_configs.get_domain_conf(domain_id) def test_default_sql_plus_sql_specific_driver_fails(self): # First confirm that if ldap is default driver, domain1 can be # loaded as sql self.config_fixture.config(group='identity', driver='ldap') self.config_fixture.config(group='assignment', driver='sql') self.load_backends() # Make any identity call to initiate the lazy loading of configs self.identity_api.list_users( domain_scope=CONF.identity.default_domain_id) self.assertIsNotNone(self.get_config(self.domains['domain1']['id'])) # Now re-initialize, but with sql as the identity driver self.config_fixture.config(group='identity', driver='sql') self.config_fixture.config(group='assignment', driver='sql') self.load_backends() # Make any identity call to initiate the lazy loading of configs, which # should fail since we would now have two sql drivers. self.assertRaises(exception.MultipleSQLDriversInConfig, self.identity_api.list_users, domain_scope=CONF.identity.default_domain_id) def test_multiple_sql_specific_drivers_fails(self): self.config_fixture.config(group='identity', driver='ldap') self.config_fixture.config(group='assignment', driver='sql') self.load_backends() # Ensure default, domain1 and domain2 exist self.domain_count = 3 self.setup_initial_domains() # Make any identity call to initiate the lazy loading of configs self.identity_api.list_users( domain_scope=CONF.identity.default_domain_id) # This will only load domain1, since the domain2 config file is # not stored in the same location self.assertIsNotNone(self.get_config(self.domains['domain1']['id'])) # Now try and manually load a 2nd sql specific driver, for domain2, # which should fail. self.assertRaises( exception.MultipleSQLDriversInConfig, self.identity_api.domain_configs._load_config_from_file, self.resource_api, [unit.TESTCONF + '/domain_configs_one_extra_sql/' + 'keystone.domain2.conf'], 'domain2') class LdapFilterTests(identity_tests.FilterTests, unit.TestCase): def setUp(self): super(LdapFilterTests, self).setUp() sqldb = self.useFixture(database.Database()) self.useFixture(ldapdb.LDAPDatabase()) self.load_backends() self.load_fixtures(default_fixtures) sqldb.recreate() _assert_backends(self, identity='ldap') def config_overrides(self): super(LdapFilterTests, self).config_overrides() self.config_fixture.config(group='identity', driver='ldap') def config_files(self): config_files = super(LdapFilterTests, self).config_files() config_files.append(unit.dirs.tests_conf('backend_ldap.conf')) return config_files @wip('Not supported by LDAP identity driver') def test_list_users_in_group_inexact_filtered(self): # The LDAP identity driver currently does not support filtering on the # listing users for a given group, so will fail this test. super(LdapFilterTests, self).test_list_users_in_group_inexact_filtered() @wip('Not supported by LDAP identity driver') def test_list_users_in_group_exact_filtered(self): # The LDAP identity driver currently does not support filtering on the # listing users for a given group, so will fail this test. super(LdapFilterTests, self).test_list_users_in_group_exact_filtered() keystone-9.0.0/keystone/tests/unit/test_v3_endpoint_policy.py0000664000567000056710000002352412701407102025724 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from six.moves import http_client from testtools import matchers from keystone.tests import unit from keystone.tests.unit import test_v3 class EndpointPolicyTestCase(test_v3.RestfulTestCase): """Test endpoint policy CRUD. In general, the controller layer of the endpoint policy extension is really just marshalling the data around the underlying manager calls. Given that the manager layer is tested in depth by the backend tests, the tests we execute here concentrate on ensuring we are correctly passing and presenting the data. """ def setUp(self): super(EndpointPolicyTestCase, self).setUp() self.policy = unit.new_policy_ref() self.policy_api.create_policy(self.policy['id'], self.policy) self.service = unit.new_service_ref() self.catalog_api.create_service(self.service['id'], self.service) self.endpoint = unit.new_endpoint_ref(self.service['id'], enabled=True, interface='public', region_id=self.region_id) self.catalog_api.create_endpoint(self.endpoint['id'], self.endpoint) self.region = unit.new_region_ref() self.catalog_api.create_region(self.region) def assert_head_and_get_return_same_response(self, url, expected_status): self.get(url, expected_status=expected_status) self.head(url, expected_status=expected_status) # endpoint policy crud tests def _crud_test(self, url): # Test when the resource does not exist also ensures # that there is not a false negative after creation. self.assert_head_and_get_return_same_response( url, expected_status=http_client.NOT_FOUND) self.put(url) # test that the new resource is accessible. self.assert_head_and_get_return_same_response( url, expected_status=http_client.NO_CONTENT) self.delete(url) # test that the deleted resource is no longer accessible self.assert_head_and_get_return_same_response( url, expected_status=http_client.NOT_FOUND) def test_crud_for_policy_for_explicit_endpoint(self): """PUT, HEAD and DELETE for explicit endpoint policy.""" url = ('/policies/%(policy_id)s/OS-ENDPOINT-POLICY' '/endpoints/%(endpoint_id)s') % { 'policy_id': self.policy['id'], 'endpoint_id': self.endpoint['id']} self._crud_test(url) def test_crud_for_policy_for_service(self): """PUT, HEAD and DELETE for service endpoint policy.""" url = ('/policies/%(policy_id)s/OS-ENDPOINT-POLICY' '/services/%(service_id)s') % { 'policy_id': self.policy['id'], 'service_id': self.service['id']} self._crud_test(url) def test_crud_for_policy_for_region_and_service(self): """PUT, HEAD and DELETE for region and service endpoint policy.""" url = ('/policies/%(policy_id)s/OS-ENDPOINT-POLICY' '/services/%(service_id)s/regions/%(region_id)s') % { 'policy_id': self.policy['id'], 'service_id': self.service['id'], 'region_id': self.region['id']} self._crud_test(url) def test_get_policy_for_endpoint(self): """GET /endpoints/{endpoint_id}/policy.""" self.put('/policies/%(policy_id)s/OS-ENDPOINT-POLICY' '/endpoints/%(endpoint_id)s' % { 'policy_id': self.policy['id'], 'endpoint_id': self.endpoint['id']}) self.head('/endpoints/%(endpoint_id)s/OS-ENDPOINT-POLICY' '/policy' % { 'endpoint_id': self.endpoint['id']}, expected_status=http_client.OK) r = self.get('/endpoints/%(endpoint_id)s/OS-ENDPOINT-POLICY' '/policy' % { 'endpoint_id': self.endpoint['id']}) self.assertValidPolicyResponse(r, ref=self.policy) def test_list_endpoints_for_policy(self): """GET /policies/%(policy_id}/endpoints.""" self.put('/policies/%(policy_id)s/OS-ENDPOINT-POLICY' '/endpoints/%(endpoint_id)s' % { 'policy_id': self.policy['id'], 'endpoint_id': self.endpoint['id']}) r = self.get('/policies/%(policy_id)s/OS-ENDPOINT-POLICY' '/endpoints' % { 'policy_id': self.policy['id']}) self.assertValidEndpointListResponse(r, ref=self.endpoint) self.assertThat(r.result.get('endpoints'), matchers.HasLength(1)) def test_endpoint_association_cleanup_when_endpoint_deleted(self): url = ('/policies/%(policy_id)s/OS-ENDPOINT-POLICY' '/endpoints/%(endpoint_id)s') % { 'policy_id': self.policy['id'], 'endpoint_id': self.endpoint['id']} self.put(url) self.head(url) self.delete('/endpoints/%(endpoint_id)s' % { 'endpoint_id': self.endpoint['id']}) self.head(url, expected_status=http_client.NOT_FOUND) def test_region_service_association_cleanup_when_region_deleted(self): url = ('/policies/%(policy_id)s/OS-ENDPOINT-POLICY' '/services/%(service_id)s/regions/%(region_id)s') % { 'policy_id': self.policy['id'], 'service_id': self.service['id'], 'region_id': self.region['id']} self.put(url) self.head(url) self.delete('/regions/%(region_id)s' % { 'region_id': self.region['id']}) self.head(url, expected_status=http_client.NOT_FOUND) def test_region_service_association_cleanup_when_service_deleted(self): url = ('/policies/%(policy_id)s/OS-ENDPOINT-POLICY' '/services/%(service_id)s/regions/%(region_id)s') % { 'policy_id': self.policy['id'], 'service_id': self.service['id'], 'region_id': self.region['id']} self.put(url) self.head(url) self.delete('/services/%(service_id)s' % { 'service_id': self.service['id']}) self.head(url, expected_status=http_client.NOT_FOUND) def test_service_association_cleanup_when_service_deleted(self): url = ('/policies/%(policy_id)s/OS-ENDPOINT-POLICY' '/services/%(service_id)s') % { 'policy_id': self.policy['id'], 'service_id': self.service['id']} self.put(url) self.get(url, expected_status=http_client.NO_CONTENT) self.delete('/policies/%(policy_id)s' % { 'policy_id': self.policy['id']}) self.head(url, expected_status=http_client.NOT_FOUND) def test_service_association_cleanup_when_policy_deleted(self): url = ('/policies/%(policy_id)s/OS-ENDPOINT-POLICY' '/services/%(service_id)s') % { 'policy_id': self.policy['id'], 'service_id': self.service['id']} self.put(url) self.get(url, expected_status=http_client.NO_CONTENT) self.delete('/services/%(service_id)s' % { 'service_id': self.service['id']}) self.head(url, expected_status=http_client.NOT_FOUND) class JsonHomeTests(test_v3.JsonHomeTestMixin): EXTENSION_LOCATION = ('http://docs.openstack.org/api/openstack-identity/3/' 'ext/OS-ENDPOINT-POLICY/1.0/rel') PARAM_LOCATION = 'http://docs.openstack.org/api/openstack-identity/3/param' JSON_HOME_DATA = { EXTENSION_LOCATION + '/endpoint_policy': { 'href-template': '/endpoints/{endpoint_id}/OS-ENDPOINT-POLICY/' 'policy', 'href-vars': { 'endpoint_id': PARAM_LOCATION + '/endpoint_id', }, }, EXTENSION_LOCATION + '/policy_endpoints': { 'href-template': '/policies/{policy_id}/OS-ENDPOINT-POLICY/' 'endpoints', 'href-vars': { 'policy_id': PARAM_LOCATION + '/policy_id', }, }, EXTENSION_LOCATION + '/endpoint_policy_association': { 'href-template': '/policies/{policy_id}/OS-ENDPOINT-POLICY/' 'endpoints/{endpoint_id}', 'href-vars': { 'policy_id': PARAM_LOCATION + '/policy_id', 'endpoint_id': PARAM_LOCATION + '/endpoint_id', }, }, EXTENSION_LOCATION + '/service_policy_association': { 'href-template': '/policies/{policy_id}/OS-ENDPOINT-POLICY/' 'services/{service_id}', 'href-vars': { 'policy_id': PARAM_LOCATION + '/policy_id', 'service_id': PARAM_LOCATION + '/service_id', }, }, EXTENSION_LOCATION + '/region_and_service_policy_association': { 'href-template': '/policies/{policy_id}/OS-ENDPOINT-POLICY/' 'services/{service_id}/regions/{region_id}', 'href-vars': { 'policy_id': PARAM_LOCATION + '/policy_id', 'service_id': PARAM_LOCATION + '/service_id', 'region_id': PARAM_LOCATION + '/region_id', }, }, } keystone-9.0.0/keystone/tests/unit/test_v3_os_revoke.py0000664000567000056710000001211712701407102024515 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import uuid from oslo_utils import timeutils import six from six.moves import http_client from testtools import matchers from keystone.common import utils from keystone.models import revoke_model from keystone.tests.unit import test_v3 from keystone.token import provider def _future_time_string(): expire_delta = datetime.timedelta(seconds=1000) future_time = timeutils.utcnow() + expire_delta return utils.isotime(future_time) class OSRevokeTests(test_v3.RestfulTestCase, test_v3.JsonHomeTestMixin): JSON_HOME_DATA = { 'http://docs.openstack.org/api/openstack-identity/3/ext/OS-REVOKE/1.0/' 'rel/events': { 'href': '/OS-REVOKE/events', }, } def test_get_empty_list(self): resp = self.get('/OS-REVOKE/events') self.assertEqual([], resp.json_body['events']) def _blank_event(self): return {} # The two values will be the same with the exception of # 'issued_before' which is set when the event is recorded. def assertReportedEventMatchesRecorded(self, event, sample, before_time): after_time = timeutils.utcnow() event_issued_before = timeutils.normalize_time( timeutils.parse_isotime(event['issued_before'])) self.assertTrue( before_time <= event_issued_before, 'invalid event issued_before time; %s is not later than %s.' % ( utils.isotime(event_issued_before, subsecond=True), utils.isotime(before_time, subsecond=True))) self.assertTrue( event_issued_before <= after_time, 'invalid event issued_before time; %s is not earlier than %s.' % ( utils.isotime(event_issued_before, subsecond=True), utils.isotime(after_time, subsecond=True))) del (event['issued_before']) self.assertEqual(sample, event) def test_revoked_list_self_url(self): revoked_list_url = '/OS-REVOKE/events' resp = self.get(revoked_list_url) links = resp.json_body['links'] self.assertThat(links['self'], matchers.EndsWith(revoked_list_url)) def test_revoked_token_in_list(self): user_id = uuid.uuid4().hex expires_at = provider.default_expire_time() sample = self._blank_event() sample['user_id'] = six.text_type(user_id) sample['expires_at'] = six.text_type(utils.isotime(expires_at)) before_time = timeutils.utcnow() self.revoke_api.revoke_by_expiration(user_id, expires_at) resp = self.get('/OS-REVOKE/events') events = resp.json_body['events'] self.assertEqual(1, len(events)) self.assertReportedEventMatchesRecorded(events[0], sample, before_time) def test_disabled_project_in_list(self): project_id = uuid.uuid4().hex sample = dict() sample['project_id'] = six.text_type(project_id) before_time = timeutils.utcnow() self.revoke_api.revoke( revoke_model.RevokeEvent(project_id=project_id)) resp = self.get('/OS-REVOKE/events') events = resp.json_body['events'] self.assertEqual(1, len(events)) self.assertReportedEventMatchesRecorded(events[0], sample, before_time) def test_disabled_domain_in_list(self): domain_id = uuid.uuid4().hex sample = dict() sample['domain_id'] = six.text_type(domain_id) before_time = timeutils.utcnow() self.revoke_api.revoke( revoke_model.RevokeEvent(domain_id=domain_id)) resp = self.get('/OS-REVOKE/events') events = resp.json_body['events'] self.assertEqual(1, len(events)) self.assertReportedEventMatchesRecorded(events[0], sample, before_time) def test_list_since_invalid(self): self.get('/OS-REVOKE/events?since=blah', expected_status=http_client.BAD_REQUEST) def test_list_since_valid(self): resp = self.get('/OS-REVOKE/events?since=2013-02-27T18:30:59.999999Z') events = resp.json_body['events'] self.assertEqual(0, len(events)) def test_since_future_time_no_events(self): domain_id = uuid.uuid4().hex sample = dict() sample['domain_id'] = six.text_type(domain_id) self.revoke_api.revoke( revoke_model.RevokeEvent(domain_id=domain_id)) resp = self.get('/OS-REVOKE/events') events = resp.json_body['events'] self.assertEqual(1, len(events)) resp = self.get('/OS-REVOKE/events?since=%s' % _future_time_string()) events = resp.json_body['events'] self.assertEqual([], events) keystone-9.0.0/keystone/tests/unit/token/0000775000567000056710000000000012701407246021627 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/token/test_token_data_helper.py0000664000567000056710000000445612701407102026710 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import uuid from testtools import matchers from keystone import exception from keystone.tests import unit from keystone.token.providers import common class TestTokenDataHelper(unit.TestCase): def setUp(self): super(TestTokenDataHelper, self).setUp() self.load_backends() self.v3_data_helper = common.V3TokenDataHelper() def test_v3_token_data_helper_populate_audit_info_string(self): token_data = {} audit_info_bytes = base64.urlsafe_b64encode(uuid.uuid4().bytes)[:-2] audit_info = audit_info_bytes.decode('utf-8') self.v3_data_helper._populate_audit_info(token_data, audit_info) self.assertIn(audit_info, token_data['audit_ids']) self.assertThat(token_data['audit_ids'], matchers.HasLength(2)) def test_v3_token_data_helper_populate_audit_info_none(self): token_data = {} self.v3_data_helper._populate_audit_info(token_data, audit_info=None) self.assertThat(token_data['audit_ids'], matchers.HasLength(1)) self.assertNotIn(None, token_data['audit_ids']) def test_v3_token_data_helper_populate_audit_info_list(self): token_data = {} audit_info = [base64.urlsafe_b64encode(uuid.uuid4().bytes)[:-2], base64.urlsafe_b64encode(uuid.uuid4().bytes)[:-2]] self.v3_data_helper._populate_audit_info(token_data, audit_info) self.assertEqual(audit_info, token_data['audit_ids']) def test_v3_token_data_helper_populate_audit_info_invalid(self): token_data = {} audit_info = dict() self.assertRaises(exception.UnexpectedError, self.v3_data_helper._populate_audit_info, token_data=token_data, audit_info=audit_info) keystone-9.0.0/keystone/tests/unit/token/test_fernet_provider.py0000664000567000056710000006274712701407102026444 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import datetime import hashlib import os import uuid import msgpack from oslo_utils import timeutils from six.moves import urllib from keystone.common import config from keystone.common import utils from keystone import exception from keystone.federation import constants as federation_constants from keystone.tests import unit from keystone.tests.unit import ksfixtures from keystone.tests.unit.ksfixtures import database from keystone.token import provider from keystone.token.providers import fernet from keystone.token.providers.fernet import token_formatters from keystone.token.providers.fernet import utils as fernet_utils CONF = config.CONF class TestFernetTokenProvider(unit.TestCase): def setUp(self): super(TestFernetTokenProvider, self).setUp() self.useFixture(ksfixtures.KeyRepository(self.config_fixture)) self.provider = fernet.Provider() def test_supports_bind_authentication_returns_false(self): self.assertFalse(self.provider._supports_bind_authentication) def test_needs_persistence_returns_false(self): self.assertFalse(self.provider.needs_persistence()) def test_invalid_v3_token_raises_token_not_found(self): # NOTE(lbragstad): Here we use the validate_non_persistent_token() # methods because the validate_v3_token() method is strictly for # validating UUID formatted tokens. It is written to assume cached # tokens from a backend, where validate_non_persistent_token() is not. token_id = uuid.uuid4().hex e = self.assertRaises( exception.TokenNotFound, self.provider.validate_non_persistent_token, token_id) self.assertIn(token_id, u'%s' % e) def test_invalid_v2_token_raises_token_not_found(self): token_id = uuid.uuid4().hex e = self.assertRaises( exception.TokenNotFound, self.provider.validate_non_persistent_token, token_id) self.assertIn(token_id, u'%s' % e) class TestValidate(unit.TestCase): def setUp(self): super(TestValidate, self).setUp() self.useFixture(ksfixtures.KeyRepository(self.config_fixture)) self.useFixture(database.Database()) self.load_backends() def config_overrides(self): super(TestValidate, self).config_overrides() self.config_fixture.config(group='token', provider='fernet') def test_validate_v3_token_simple(self): # Check the fields in the token result when use validate_v3_token # with a simple token. domain_ref = unit.new_domain_ref() domain_ref = self.resource_api.create_domain(domain_ref['id'], domain_ref) user_ref = unit.new_user_ref(domain_ref['id']) user_ref = self.identity_api.create_user(user_ref) method_names = ['password'] token_id, token_data_ = self.token_provider_api.issue_v3_token( user_ref['id'], method_names) token_data = self.token_provider_api.validate_v3_token(token_id) token = token_data['token'] self.assertIsInstance(token['audit_ids'], list) self.assertIsInstance(token['expires_at'], str) self.assertIsInstance(token['issued_at'], str) self.assertEqual(method_names, token['methods']) exp_user_info = { 'id': user_ref['id'], 'name': user_ref['name'], 'domain': { 'id': domain_ref['id'], 'name': domain_ref['name'], }, } self.assertEqual(exp_user_info, token['user']) def test_validate_v3_token_federated_info(self): # Check the user fields in the token result when use validate_v3_token # when the token has federated info. domain_ref = unit.new_domain_ref() domain_ref = self.resource_api.create_domain(domain_ref['id'], domain_ref) user_ref = unit.new_user_ref(domain_ref['id']) user_ref = self.identity_api.create_user(user_ref) method_names = ['mapped'] group_ids = [uuid.uuid4().hex, ] identity_provider = uuid.uuid4().hex protocol = uuid.uuid4().hex auth_context = { 'user_id': user_ref['id'], 'group_ids': group_ids, federation_constants.IDENTITY_PROVIDER: identity_provider, federation_constants.PROTOCOL: protocol, } token_id, token_data_ = self.token_provider_api.issue_v3_token( user_ref['id'], method_names, auth_context=auth_context) token_data = self.token_provider_api.validate_v3_token(token_id) token = token_data['token'] exp_user_info = { 'id': user_ref['id'], 'name': user_ref['id'], 'domain': {'id': CONF.federation.federated_domain_name, 'name': CONF.federation.federated_domain_name, }, federation_constants.FEDERATION: { 'groups': [{'id': group_id} for group_id in group_ids], 'identity_provider': {'id': identity_provider, }, 'protocol': {'id': protocol, }, }, } self.assertEqual(exp_user_info, token['user']) def test_validate_v3_token_trust(self): # Check the trust fields in the token result when use validate_v3_token # when the token has trust info. domain_ref = unit.new_domain_ref() domain_ref = self.resource_api.create_domain(domain_ref['id'], domain_ref) user_ref = unit.new_user_ref(domain_ref['id']) user_ref = self.identity_api.create_user(user_ref) trustor_user_ref = unit.new_user_ref(domain_ref['id']) trustor_user_ref = self.identity_api.create_user(trustor_user_ref) project_ref = unit.new_project_ref(domain_id=domain_ref['id']) project_ref = self.resource_api.create_project(project_ref['id'], project_ref) role_ref = unit.new_role_ref() role_ref = self.role_api.create_role(role_ref['id'], role_ref) self.assignment_api.create_grant( role_ref['id'], user_id=user_ref['id'], project_id=project_ref['id']) self.assignment_api.create_grant( role_ref['id'], user_id=trustor_user_ref['id'], project_id=project_ref['id']) trustor_user_id = trustor_user_ref['id'] trustee_user_id = user_ref['id'] trust_ref = unit.new_trust_ref( trustor_user_id, trustee_user_id, project_id=project_ref['id'], role_ids=[role_ref['id'], ]) trust_ref = self.trust_api.create_trust(trust_ref['id'], trust_ref, trust_ref['roles']) method_names = ['password'] token_id, token_data_ = self.token_provider_api.issue_v3_token( user_ref['id'], method_names, project_id=project_ref['id'], trust=trust_ref) token_data = self.token_provider_api.validate_v3_token(token_id) token = token_data['token'] exp_trust_info = { 'id': trust_ref['id'], 'impersonation': False, 'trustee_user': {'id': user_ref['id'], }, 'trustor_user': {'id': trustor_user_ref['id'], }, } self.assertEqual(exp_trust_info, token['OS-TRUST:trust']) def test_validate_v3_token_validation_error_exc(self): # When the token format isn't recognized, TokenNotFound is raised. # A uuid string isn't a valid Fernet token. token_id = uuid.uuid4().hex self.assertRaises(exception.TokenNotFound, self.token_provider_api.validate_v3_token, token_id) class TestTokenFormatter(unit.TestCase): def setUp(self): super(TestTokenFormatter, self).setUp() self.useFixture(ksfixtures.KeyRepository(self.config_fixture)) def test_restore_padding(self): # 'a' will result in '==' padding, 'aa' will result in '=' padding, and # 'aaa' will result in no padding. binary_to_test = [b'a', b'aa', b'aaa'] for binary in binary_to_test: # base64.urlsafe_b64encode takes six.binary_type and returns # six.binary_type. encoded_string = base64.urlsafe_b64encode(binary) encoded_string = encoded_string.decode('utf-8') # encoded_string is now six.text_type. encoded_str_without_padding = encoded_string.rstrip('=') self.assertFalse(encoded_str_without_padding.endswith('=')) encoded_str_with_padding_restored = ( token_formatters.TokenFormatter.restore_padding( encoded_str_without_padding) ) self.assertEqual(encoded_string, encoded_str_with_padding_restored) def test_legacy_padding_validation(self): first_value = uuid.uuid4().hex second_value = uuid.uuid4().hex payload = (first_value, second_value) msgpack_payload = msgpack.packb(payload) # msgpack_payload is six.binary_type. tf = token_formatters.TokenFormatter() # NOTE(lbragstad): This method preserves the way that keystone used to # percent encode the tokens, prior to bug #1491926. def legacy_pack(payload): # payload is six.binary_type. encrypted_payload = tf.crypto.encrypt(payload) # encrypted_payload is six.binary_type. # the encrypted_payload is returned with padding appended self.assertTrue(encrypted_payload.endswith(b'=')) # using urllib.parse.quote will percent encode the padding, like # keystone did in Kilo. percent_encoded_payload = urllib.parse.quote(encrypted_payload) # percent_encoded_payload is six.text_type. # ensure that the padding was actually percent encoded self.assertTrue(percent_encoded_payload.endswith('%3D')) return percent_encoded_payload token_with_legacy_padding = legacy_pack(msgpack_payload) # token_with_legacy_padding is six.text_type. # demonstrate the we can validate a payload that has been percent # encoded with the Fernet logic that existed in Kilo serialized_payload = tf.unpack(token_with_legacy_padding) # serialized_payload is six.binary_type. returned_payload = msgpack.unpackb(serialized_payload) # returned_payload contains six.binary_type. self.assertEqual(first_value, returned_payload[0].decode('utf-8')) self.assertEqual(second_value, returned_payload[1].decode('utf-8')) class TestPayloads(unit.TestCase): def assertTimestampsEqual(self, expected, actual): # The timestamp that we get back when parsing the payload may not # exactly match the timestamp that was put in the payload due to # conversion to and from a float. exp_time = timeutils.parse_isotime(expected) actual_time = timeutils.parse_isotime(actual) # the granularity of timestamp string is microseconds and it's only the # last digit in the representation that's different, so use a delta # just above nanoseconds. return self.assertCloseEnoughForGovernmentWork(exp_time, actual_time, delta=1e-05) def test_uuid_hex_to_byte_conversions(self): payload_cls = token_formatters.BasePayload expected_hex_uuid = uuid.uuid4().hex uuid_obj = uuid.UUID(expected_hex_uuid) expected_uuid_in_bytes = uuid_obj.bytes actual_uuid_in_bytes = payload_cls.convert_uuid_hex_to_bytes( expected_hex_uuid) self.assertEqual(expected_uuid_in_bytes, actual_uuid_in_bytes) actual_hex_uuid = payload_cls.convert_uuid_bytes_to_hex( expected_uuid_in_bytes) self.assertEqual(expected_hex_uuid, actual_hex_uuid) def test_time_string_to_float_conversions(self): payload_cls = token_formatters.BasePayload original_time_str = utils.isotime(subsecond=True) time_obj = timeutils.parse_isotime(original_time_str) expected_time_float = ( (timeutils.normalize_time(time_obj) - datetime.datetime.utcfromtimestamp(0)).total_seconds()) # NOTE(lbragstad): The token expiration time for Fernet tokens is # passed in the payload of the token. This is different from the token # creation time, which is handled by Fernet and doesn't support # subsecond precision because it is a timestamp integer. self.assertIsInstance(expected_time_float, float) actual_time_float = payload_cls._convert_time_string_to_float( original_time_str) self.assertIsInstance(actual_time_float, float) self.assertEqual(expected_time_float, actual_time_float) # Generate expected_time_str using the same time float. Using # original_time_str from utils.isotime will occasionally fail due to # floating point rounding differences. time_object = datetime.datetime.utcfromtimestamp(actual_time_float) expected_time_str = utils.isotime(time_object, subsecond=True) actual_time_str = payload_cls._convert_float_to_time_string( actual_time_float) self.assertEqual(expected_time_str, actual_time_str) def _test_payload(self, payload_class, exp_user_id=None, exp_methods=None, exp_project_id=None, exp_domain_id=None, exp_trust_id=None, exp_federated_info=None, exp_access_token_id=None): exp_user_id = exp_user_id or uuid.uuid4().hex exp_methods = exp_methods or ['password'] exp_expires_at = utils.isotime(timeutils.utcnow(), subsecond=True) exp_audit_ids = [provider.random_urlsafe_str()] payload = payload_class.assemble( exp_user_id, exp_methods, exp_project_id, exp_domain_id, exp_expires_at, exp_audit_ids, exp_trust_id, exp_federated_info, exp_access_token_id) (user_id, methods, project_id, domain_id, expires_at, audit_ids, trust_id, federated_info, access_token_id) = payload_class.disassemble(payload) self.assertEqual(exp_user_id, user_id) self.assertEqual(exp_methods, methods) self.assertTimestampsEqual(exp_expires_at, expires_at) self.assertEqual(exp_audit_ids, audit_ids) self.assertEqual(exp_project_id, project_id) self.assertEqual(exp_domain_id, domain_id) self.assertEqual(exp_trust_id, trust_id) self.assertEqual(exp_access_token_id, access_token_id) if exp_federated_info: self.assertDictEqual(exp_federated_info, federated_info) else: self.assertIsNone(federated_info) def test_unscoped_payload(self): self._test_payload(token_formatters.UnscopedPayload) def test_project_scoped_payload(self): self._test_payload(token_formatters.ProjectScopedPayload, exp_project_id=uuid.uuid4().hex) def test_domain_scoped_payload(self): self._test_payload(token_formatters.DomainScopedPayload, exp_domain_id=uuid.uuid4().hex) def test_domain_scoped_payload_with_default_domain(self): self._test_payload(token_formatters.DomainScopedPayload, exp_domain_id=CONF.identity.default_domain_id) def test_trust_scoped_payload(self): self._test_payload(token_formatters.TrustScopedPayload, exp_project_id=uuid.uuid4().hex, exp_trust_id=uuid.uuid4().hex) def test_unscoped_payload_with_non_uuid_user_id(self): self._test_payload(token_formatters.UnscopedPayload, exp_user_id='someNonUuidUserId') def test_unscoped_payload_with_16_char_non_uuid_user_id(self): self._test_payload(token_formatters.UnscopedPayload, exp_user_id='0123456789abcdef') def test_project_scoped_payload_with_non_uuid_ids(self): self._test_payload(token_formatters.ProjectScopedPayload, exp_user_id='someNonUuidUserId', exp_project_id='someNonUuidProjectId') def test_project_scoped_payload_with_16_char_non_uuid_ids(self): self._test_payload(token_formatters.ProjectScopedPayload, exp_user_id='0123456789abcdef', exp_project_id='0123456789abcdef') def test_domain_scoped_payload_with_non_uuid_user_id(self): self._test_payload(token_formatters.DomainScopedPayload, exp_user_id='nonUuidUserId', exp_domain_id=uuid.uuid4().hex) def test_domain_scoped_payload_with_16_char_non_uuid_user_id(self): self._test_payload(token_formatters.DomainScopedPayload, exp_user_id='0123456789abcdef', exp_domain_id=uuid.uuid4().hex) def test_trust_scoped_payload_with_non_uuid_ids(self): self._test_payload(token_formatters.TrustScopedPayload, exp_user_id='someNonUuidUserId', exp_project_id='someNonUuidProjectId', exp_trust_id=uuid.uuid4().hex) def test_trust_scoped_payload_with_16_char_non_uuid_ids(self): self._test_payload(token_formatters.TrustScopedPayload, exp_user_id='0123456789abcdef', exp_project_id='0123456789abcdef', exp_trust_id=uuid.uuid4().hex) def _test_federated_payload_with_ids(self, exp_user_id, exp_group_id): exp_federated_info = {'group_ids': [{'id': exp_group_id}], 'idp_id': uuid.uuid4().hex, 'protocol_id': uuid.uuid4().hex} self._test_payload(token_formatters.FederatedUnscopedPayload, exp_user_id=exp_user_id, exp_federated_info=exp_federated_info) def test_federated_payload_with_non_uuid_ids(self): self._test_federated_payload_with_ids('someNonUuidUserId', 'someNonUuidGroupId') def test_federated_payload_with_16_char_non_uuid_ids(self): self._test_federated_payload_with_ids('0123456789abcdef', '0123456789abcdef') def test_federated_project_scoped_payload(self): exp_federated_info = {'group_ids': [{'id': 'someNonUuidGroupId'}], 'idp_id': uuid.uuid4().hex, 'protocol_id': uuid.uuid4().hex} self._test_payload(token_formatters.FederatedProjectScopedPayload, exp_user_id='someNonUuidUserId', exp_methods=['token'], exp_project_id=uuid.uuid4().hex, exp_federated_info=exp_federated_info) def test_federated_domain_scoped_payload(self): exp_federated_info = {'group_ids': [{'id': 'someNonUuidGroupId'}], 'idp_id': uuid.uuid4().hex, 'protocol_id': uuid.uuid4().hex} self._test_payload(token_formatters.FederatedDomainScopedPayload, exp_user_id='someNonUuidUserId', exp_methods=['token'], exp_domain_id=uuid.uuid4().hex, exp_federated_info=exp_federated_info) def test_oauth_scoped_payload(self): self._test_payload(token_formatters.OauthScopedPayload, exp_project_id=uuid.uuid4().hex, exp_access_token_id=uuid.uuid4().hex) class TestFernetKeyRotation(unit.TestCase): def setUp(self): super(TestFernetKeyRotation, self).setUp() # A collection of all previously-seen signatures of the key # repository's contents. self.key_repo_signatures = set() @property def keys(self): """Key files converted to numbers.""" return sorted( int(x) for x in os.listdir(CONF.fernet_tokens.key_repository)) @property def key_repository_size(self): """The number of keys in the key repository.""" return len(self.keys) @property def key_repository_signature(self): """Create a "thumbprint" of the current key repository. Because key files are renamed, this produces a hash of the contents of the key files, ignoring their filenames. The resulting signature can be used, for example, to ensure that you have a unique set of keys after you perform a key rotation (taking a static set of keys, and simply shuffling them, would fail such a test). """ # Load the keys into a list, keys is list of six.text_type. keys = fernet_utils.load_keys() # Sort the list of keys by the keys themselves (they were previously # sorted by filename). keys.sort() # Create the thumbprint using all keys in the repository. signature = hashlib.sha1() for key in keys: # Need to convert key to six.binary_type for update. signature.update(key.encode('utf-8')) return signature.hexdigest() def assertRepositoryState(self, expected_size): """Validate the state of the key repository.""" self.assertEqual(expected_size, self.key_repository_size) self.assertUniqueRepositoryState() def assertUniqueRepositoryState(self): """Ensures that the current key repo state has not been seen before.""" # This is assigned to a variable because it takes some work to # calculate. signature = self.key_repository_signature # Ensure the signature is not in the set of previously seen signatures. self.assertNotIn(signature, self.key_repo_signatures) # Add the signature to the set of repository signatures to validate # that we don't see it again later. self.key_repo_signatures.add(signature) def test_rotation(self): # Initializing a key repository results in this many keys. We don't # support max_active_keys being set any lower. min_active_keys = 2 # Simulate every rotation strategy up to "rotating once a week while # maintaining a year's worth of keys." for max_active_keys in range(min_active_keys, 52 + 1): self.config_fixture.config(group='fernet_tokens', max_active_keys=max_active_keys) # Ensure that resetting the key repository always results in 2 # active keys. self.useFixture(ksfixtures.KeyRepository(self.config_fixture)) # Validate the initial repository state. self.assertRepositoryState(expected_size=min_active_keys) # The repository should be initialized with a staged key (0) and a # primary key (1). The next key is just auto-incremented. exp_keys = [0, 1] next_key_number = exp_keys[-1] + 1 # keep track of next key self.assertEqual(exp_keys, self.keys) # Rotate the keys just enough times to fully populate the key # repository. for rotation in range(max_active_keys - min_active_keys): fernet_utils.rotate_keys() self.assertRepositoryState(expected_size=rotation + 3) exp_keys.append(next_key_number) next_key_number += 1 self.assertEqual(exp_keys, self.keys) # We should have a fully populated key repository now. self.assertEqual(max_active_keys, self.key_repository_size) # Rotate an additional number of times to ensure that we maintain # the desired number of active keys. for rotation in range(10): fernet_utils.rotate_keys() self.assertRepositoryState(expected_size=max_active_keys) exp_keys.pop(1) exp_keys.append(next_key_number) next_key_number += 1 self.assertEqual(exp_keys, self.keys) def test_non_numeric_files(self): self.useFixture(ksfixtures.KeyRepository(self.config_fixture)) evil_file = os.path.join(CONF.fernet_tokens.key_repository, '99.bak') with open(evil_file, 'w'): pass fernet_utils.rotate_keys() self.assertTrue(os.path.isfile(evil_file)) keys = 0 for x in os.listdir(CONF.fernet_tokens.key_repository): if x == '99.bak': continue keys += 1 self.assertEqual(3, keys) class TestLoadKeys(unit.TestCase): def test_non_numeric_files(self): self.useFixture(ksfixtures.KeyRepository(self.config_fixture)) evil_file = os.path.join(CONF.fernet_tokens.key_repository, '~1') with open(evil_file, 'w'): pass keys = fernet_utils.load_keys() self.assertEqual(2, len(keys)) self.assertTrue(len(keys[0])) keystone-9.0.0/keystone/tests/unit/token/test_uuid_provider.py0000664000567000056710000000177712701407102026123 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.tests import unit from keystone.token.providers import uuid class TestUuidTokenProvider(unit.TestCase): def setUp(self): super(TestUuidTokenProvider, self).setUp() self.provider = uuid.Provider() def test_supports_bind_authentication_returns_true(self): self.assertTrue(self.provider._supports_bind_authentication) def test_need_persistence_return_true(self): self.assertIs(True, self.provider.needs_persistence()) keystone-9.0.0/keystone/tests/unit/token/__init__.py0000664000567000056710000000000012701407102023715 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/token/test_pkiz_provider.py0000664000567000056710000000177712701407102026132 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.tests import unit from keystone.token.providers import pkiz class TestPkizTokenProvider(unit.TestCase): def setUp(self): super(TestPkizTokenProvider, self).setUp() self.provider = pkiz.Provider() def test_supports_bind_authentication_returns_true(self): self.assertTrue(self.provider._supports_bind_authentication) def test_need_persistence_return_true(self): self.assertIs(True, self.provider.needs_persistence()) keystone-9.0.0/keystone/tests/unit/token/test_token_model.py0000664000567000056710000003074112701407102025534 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import uuid from oslo_config import cfg from oslo_utils import timeutils from six.moves import range from keystone import exception from keystone.federation import constants as federation_constants from keystone.models import token_model from keystone.tests.unit import core from keystone.tests.unit import test_token_provider CONF = cfg.CONF class TestKeystoneTokenModel(core.TestCase): def setUp(self): super(TestKeystoneTokenModel, self).setUp() self.v2_sample_token = copy.deepcopy( test_token_provider.SAMPLE_V2_TOKEN) self.v3_sample_token = copy.deepcopy( test_token_provider.SAMPLE_V3_TOKEN) def test_token_model_v3(self): token_data = token_model.KeystoneToken(uuid.uuid4().hex, self.v3_sample_token) self.assertIs(token_model.V3, token_data.version) expires = timeutils.normalize_time(timeutils.parse_isotime( self.v3_sample_token['token']['expires_at'])) issued = timeutils.normalize_time(timeutils.parse_isotime( self.v3_sample_token['token']['issued_at'])) self.assertEqual(expires, token_data.expires) self.assertEqual(issued, token_data.issued) self.assertEqual(self.v3_sample_token['token']['user']['id'], token_data.user_id) self.assertEqual(self.v3_sample_token['token']['user']['name'], token_data.user_name) self.assertEqual(self.v3_sample_token['token']['user']['domain']['id'], token_data.user_domain_id) self.assertEqual( self.v3_sample_token['token']['user']['domain']['name'], token_data.user_domain_name) self.assertEqual( self.v3_sample_token['token']['project']['domain']['id'], token_data.project_domain_id) self.assertEqual( self.v3_sample_token['token']['project']['domain']['name'], token_data.project_domain_name) self.assertEqual(self.v3_sample_token['token']['OS-TRUST:trust']['id'], token_data.trust_id) self.assertEqual( self.v3_sample_token['token']['OS-TRUST:trust']['trustor_user_id'], token_data.trustor_user_id) self.assertEqual( self.v3_sample_token['token']['OS-TRUST:trust']['trustee_user_id'], token_data.trustee_user_id) # Project Scoped Token self.assertRaises(exception.UnexpectedError, getattr, token_data, 'domain_id') self.assertRaises(exception.UnexpectedError, getattr, token_data, 'domain_name') self.assertFalse(token_data.domain_scoped) self.assertEqual(self.v3_sample_token['token']['project']['id'], token_data.project_id) self.assertEqual(self.v3_sample_token['token']['project']['name'], token_data.project_name) self.assertTrue(token_data.project_scoped) self.assertTrue(token_data.scoped) self.assertTrue(token_data.trust_scoped) self.assertEqual( [r['id'] for r in self.v3_sample_token['token']['roles']], token_data.role_ids) self.assertEqual( [r['name'] for r in self.v3_sample_token['token']['roles']], token_data.role_names) token_data.pop('project') self.assertFalse(token_data.project_scoped) self.assertFalse(token_data.scoped) self.assertRaises(exception.UnexpectedError, getattr, token_data, 'project_id') self.assertRaises(exception.UnexpectedError, getattr, token_data, 'project_name') self.assertFalse(token_data.project_scoped) domain_id = uuid.uuid4().hex domain_name = uuid.uuid4().hex token_data['domain'] = {'id': domain_id, 'name': domain_name} self.assertEqual(domain_id, token_data.domain_id) self.assertEqual(domain_name, token_data.domain_name) self.assertTrue(token_data.domain_scoped) token_data['audit_ids'] = [uuid.uuid4().hex] self.assertEqual(token_data.audit_id, token_data['audit_ids'][0]) self.assertEqual(token_data.audit_chain_id, token_data['audit_ids'][0]) token_data['audit_ids'].append(uuid.uuid4().hex) self.assertEqual(token_data.audit_chain_id, token_data['audit_ids'][1]) del token_data['audit_ids'] self.assertIsNone(token_data.audit_id) self.assertIsNone(token_data.audit_chain_id) def test_token_model_v3_federated_user(self): token_data = token_model.KeystoneToken(token_id=uuid.uuid4().hex, token_data=self.v3_sample_token) federation_data = {'identity_provider': {'id': uuid.uuid4().hex}, 'protocol': {'id': 'saml2'}, 'groups': [{'id': uuid.uuid4().hex} for x in range(1, 5)]} self.assertFalse(token_data.is_federated_user) self.assertEqual([], token_data.federation_group_ids) self.assertIsNone(token_data.federation_protocol_id) self.assertIsNone(token_data.federation_idp_id) token_data['user'][federation_constants.FEDERATION] = federation_data self.assertTrue(token_data.is_federated_user) self.assertEqual([x['id'] for x in federation_data['groups']], token_data.federation_group_ids) self.assertEqual(federation_data['protocol']['id'], token_data.federation_protocol_id) self.assertEqual(federation_data['identity_provider']['id'], token_data.federation_idp_id) def test_token_model_v2_federated_user(self): token_data = token_model.KeystoneToken(token_id=uuid.uuid4().hex, token_data=self.v2_sample_token) federation_data = {'identity_provider': {'id': uuid.uuid4().hex}, 'protocol': {'id': 'saml2'}, 'groups': [{'id': uuid.uuid4().hex} for x in range(1, 5)]} self.assertFalse(token_data.is_federated_user) self.assertEqual([], token_data.federation_group_ids) self.assertIsNone(token_data.federation_protocol_id) self.assertIsNone(token_data.federation_idp_id) token_data['user'][federation_constants.FEDERATION] = federation_data # Federated users should not exist in V2, the data should remain empty self.assertFalse(token_data.is_federated_user) self.assertEqual([], token_data.federation_group_ids) self.assertIsNone(token_data.federation_protocol_id) self.assertIsNone(token_data.federation_idp_id) def test_token_model_v2(self): token_data = token_model.KeystoneToken(uuid.uuid4().hex, self.v2_sample_token) self.assertIs(token_model.V2, token_data.version) expires = timeutils.normalize_time(timeutils.parse_isotime( self.v2_sample_token['access']['token']['expires'])) issued = timeutils.normalize_time(timeutils.parse_isotime( self.v2_sample_token['access']['token']['issued_at'])) self.assertEqual(expires, token_data.expires) self.assertEqual(issued, token_data.issued) self.assertEqual(self.v2_sample_token['access']['user']['id'], token_data.user_id) self.assertEqual(self.v2_sample_token['access']['user']['name'], token_data.user_name) self.assertEqual(CONF.identity.default_domain_id, token_data.user_domain_id) self.assertEqual('Default', token_data.user_domain_name) self.assertEqual(CONF.identity.default_domain_id, token_data.project_domain_id) self.assertEqual('Default', token_data.project_domain_name) self.assertEqual(self.v2_sample_token['access']['trust']['id'], token_data.trust_id) self.assertEqual( self.v2_sample_token['access']['trust']['trustor_user_id'], token_data.trustor_user_id) self.assertEqual( self.v2_sample_token['access']['trust']['impersonation'], token_data.trust_impersonation) self.assertEqual( self.v2_sample_token['access']['trust']['trustee_user_id'], token_data.trustee_user_id) # Project Scoped Token self.assertEqual( self.v2_sample_token['access']['token']['tenant']['id'], token_data.project_id) self.assertEqual( self.v2_sample_token['access']['token']['tenant']['name'], token_data.project_name) self.assertTrue(token_data.project_scoped) self.assertTrue(token_data.scoped) self.assertTrue(token_data.trust_scoped) self.assertEqual( [r['name'] for r in self.v2_sample_token['access']['user']['roles']], token_data.role_names) token_data['token'].pop('tenant') self.assertFalse(token_data.scoped) self.assertFalse(token_data.project_scoped) self.assertFalse(token_data.domain_scoped) self.assertRaises(exception.UnexpectedError, getattr, token_data, 'project_id') self.assertRaises(exception.UnexpectedError, getattr, token_data, 'project_name') self.assertRaises(exception.UnexpectedError, getattr, token_data, 'project_domain_id') self.assertRaises(exception.UnexpectedError, getattr, token_data, 'project_domain_id') # No Domain Scoped tokens in V2 self.assertRaises(NotImplementedError, getattr, token_data, 'domain_id') self.assertRaises(NotImplementedError, getattr, token_data, 'domain_name') token_data['domain'] = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex} self.assertRaises(NotImplementedError, getattr, token_data, 'domain_id') self.assertRaises(NotImplementedError, getattr, token_data, 'domain_name') self.assertFalse(token_data.domain_scoped) token_data['token']['audit_ids'] = [uuid.uuid4().hex] self.assertEqual(token_data.audit_chain_id, token_data['token']['audit_ids'][0]) token_data['token']['audit_ids'].append(uuid.uuid4().hex) self.assertEqual(token_data.audit_chain_id, token_data['token']['audit_ids'][1]) self.assertEqual(token_data.audit_id, token_data['token']['audit_ids'][0]) del token_data['token']['audit_ids'] self.assertIsNone(token_data.audit_id) self.assertIsNone(token_data.audit_chain_id) def test_token_model_unknown(self): self.assertRaises(exception.UnsupportedTokenVersionException, token_model.KeystoneToken, token_id=uuid.uuid4().hex, token_data={'bogus_data': uuid.uuid4().hex}) def test_token_model_dual_scoped_token(self): domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex} self.v2_sample_token['access']['domain'] = domain self.v3_sample_token['token']['domain'] = domain # V2 Tokens Cannot be domain scoped, this should work token_model.KeystoneToken(token_id=uuid.uuid4().hex, token_data=self.v2_sample_token) self.assertRaises(exception.UnexpectedError, token_model.KeystoneToken, token_id=uuid.uuid4().hex, token_data=self.v3_sample_token) keystone-9.0.0/keystone/tests/unit/token/test_backends.py0000664000567000056710000006000712701407102025004 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime import hashlib import uuid from keystoneclient.common import cms from oslo_config import cfg from oslo_utils import timeutils import six from six.moves import range from keystone import exception from keystone.tests import unit from keystone.tests.unit import utils as test_utils from keystone.token import provider CONF = cfg.CONF NULL_OBJECT = object() class TokenTests(object): def _create_token_id(self): # Use a token signed by the cms module token_id = "" for i in range(1, 20): token_id += uuid.uuid4().hex return cms.cms_sign_token(token_id, CONF.signing.certfile, CONF.signing.keyfile) def _assert_revoked_token_list_matches_token_persistence( self, revoked_token_id_list): # Assert that the list passed in matches the list returned by the # token persistence service persistence_list = [ x['id'] for x in self.token_provider_api.list_revoked_tokens() ] self.assertEqual(persistence_list, revoked_token_id_list) def test_token_crud(self): token_id = self._create_token_id() data = {'id': token_id, 'a': 'b', 'trust_id': None, 'user': {'id': 'testuserid'}, 'token_data': {'access': {'token': { 'audit_ids': [uuid.uuid4().hex]}}}} data_ref = self.token_provider_api._persistence.create_token(token_id, data) expires = data_ref.pop('expires') data_ref.pop('user_id') self.assertIsInstance(expires, datetime.datetime) data_ref.pop('id') data.pop('id') self.assertDictEqual(data, data_ref) new_data_ref = self.token_provider_api._persistence.get_token(token_id) expires = new_data_ref.pop('expires') self.assertIsInstance(expires, datetime.datetime) new_data_ref.pop('user_id') new_data_ref.pop('id') self.assertEqual(data, new_data_ref) self.token_provider_api._persistence.delete_token(token_id) self.assertRaises( exception.TokenNotFound, self.token_provider_api._persistence.get_token, token_id) self.assertRaises( exception.TokenNotFound, self.token_provider_api._persistence.delete_token, token_id) def create_token_sample_data(self, token_id=None, tenant_id=None, trust_id=None, user_id=None, expires=None): if token_id is None: token_id = self._create_token_id() if user_id is None: user_id = 'testuserid' # FIXME(morganfainberg): These tokens look nothing like "Real" tokens. # This should be fixed when token issuance is cleaned up. data = {'id': token_id, 'a': 'b', 'user': {'id': user_id}, 'access': {'token': {'audit_ids': [uuid.uuid4().hex]}}} if tenant_id is not None: data['tenant'] = {'id': tenant_id, 'name': tenant_id} if tenant_id is NULL_OBJECT: data['tenant'] = None if expires is not None: data['expires'] = expires if trust_id is not None: data['trust_id'] = trust_id data['access'].setdefault('trust', {}) # Testuserid2 is used here since a trustee will be different in # the cases of impersonation and therefore should not match the # token's user_id. data['access']['trust']['trustee_user_id'] = 'testuserid2' data['token_version'] = provider.V2 # Issue token stores a copy of all token data at token['token_data']. # This emulates that assumption as part of the test. data['token_data'] = copy.deepcopy(data) new_token = self.token_provider_api._persistence.create_token(token_id, data) return new_token['id'], data def test_delete_tokens(self): tokens = self.token_provider_api._persistence._list_tokens( 'testuserid') self.assertEqual(0, len(tokens)) token_id1, data = self.create_token_sample_data( tenant_id='testtenantid') token_id2, data = self.create_token_sample_data( tenant_id='testtenantid') token_id3, data = self.create_token_sample_data( tenant_id='testtenantid', user_id='testuserid1') tokens = self.token_provider_api._persistence._list_tokens( 'testuserid') self.assertEqual(2, len(tokens)) self.assertIn(token_id2, tokens) self.assertIn(token_id1, tokens) self.token_provider_api._persistence.delete_tokens( user_id='testuserid', tenant_id='testtenantid') tokens = self.token_provider_api._persistence._list_tokens( 'testuserid') self.assertEqual(0, len(tokens)) self.assertRaises(exception.TokenNotFound, self.token_provider_api._persistence.get_token, token_id1) self.assertRaises(exception.TokenNotFound, self.token_provider_api._persistence.get_token, token_id2) self.token_provider_api._persistence.get_token(token_id3) def test_delete_tokens_trust(self): tokens = self.token_provider_api._persistence._list_tokens( user_id='testuserid') self.assertEqual(0, len(tokens)) token_id1, data = self.create_token_sample_data( tenant_id='testtenantid', trust_id='testtrustid') token_id2, data = self.create_token_sample_data( tenant_id='testtenantid', user_id='testuserid1', trust_id='testtrustid1') tokens = self.token_provider_api._persistence._list_tokens( 'testuserid') self.assertEqual(1, len(tokens)) self.assertIn(token_id1, tokens) self.token_provider_api._persistence.delete_tokens( user_id='testuserid', tenant_id='testtenantid', trust_id='testtrustid') self.assertRaises(exception.TokenNotFound, self.token_provider_api._persistence.get_token, token_id1) self.token_provider_api._persistence.get_token(token_id2) def _test_token_list(self, token_list_fn): tokens = token_list_fn('testuserid') self.assertEqual(0, len(tokens)) token_id1, data = self.create_token_sample_data() tokens = token_list_fn('testuserid') self.assertEqual(1, len(tokens)) self.assertIn(token_id1, tokens) token_id2, data = self.create_token_sample_data() tokens = token_list_fn('testuserid') self.assertEqual(2, len(tokens)) self.assertIn(token_id2, tokens) self.assertIn(token_id1, tokens) self.token_provider_api._persistence.delete_token(token_id1) tokens = token_list_fn('testuserid') self.assertIn(token_id2, tokens) self.assertNotIn(token_id1, tokens) self.token_provider_api._persistence.delete_token(token_id2) tokens = token_list_fn('testuserid') self.assertNotIn(token_id2, tokens) self.assertNotIn(token_id1, tokens) # tenant-specific tokens tenant1 = uuid.uuid4().hex tenant2 = uuid.uuid4().hex token_id3, data = self.create_token_sample_data(tenant_id=tenant1) token_id4, data = self.create_token_sample_data(tenant_id=tenant2) # test for existing but empty tenant (LP:1078497) token_id5, data = self.create_token_sample_data(tenant_id=NULL_OBJECT) tokens = token_list_fn('testuserid') self.assertEqual(3, len(tokens)) self.assertNotIn(token_id1, tokens) self.assertNotIn(token_id2, tokens) self.assertIn(token_id3, tokens) self.assertIn(token_id4, tokens) self.assertIn(token_id5, tokens) tokens = token_list_fn('testuserid', tenant2) self.assertEqual(1, len(tokens)) self.assertNotIn(token_id1, tokens) self.assertNotIn(token_id2, tokens) self.assertNotIn(token_id3, tokens) self.assertIn(token_id4, tokens) def test_token_list(self): self._test_token_list( self.token_provider_api._persistence._list_tokens) def test_token_list_trust(self): trust_id = uuid.uuid4().hex token_id5, data = self.create_token_sample_data(trust_id=trust_id) tokens = self.token_provider_api._persistence._list_tokens( 'testuserid', trust_id=trust_id) self.assertEqual(1, len(tokens)) self.assertIn(token_id5, tokens) def test_get_token_returns_not_found(self): self.assertRaises(exception.TokenNotFound, self.token_provider_api._persistence.get_token, uuid.uuid4().hex) def test_delete_token_returns_not_found(self): self.assertRaises(exception.TokenNotFound, self.token_provider_api._persistence.delete_token, uuid.uuid4().hex) def test_expired_token(self): token_id = uuid.uuid4().hex expire_time = timeutils.utcnow() - datetime.timedelta(minutes=1) data = {'id_hash': token_id, 'id': token_id, 'a': 'b', 'expires': expire_time, 'trust_id': None, 'user': {'id': 'testuserid'}} data_ref = self.token_provider_api._persistence.create_token(token_id, data) data_ref.pop('user_id') self.assertDictEqual(data, data_ref) self.assertRaises(exception.TokenNotFound, self.token_provider_api._persistence.get_token, token_id) def test_null_expires_token(self): token_id = uuid.uuid4().hex data = {'id': token_id, 'id_hash': token_id, 'a': 'b', 'expires': None, 'user': {'id': 'testuserid'}} data_ref = self.token_provider_api._persistence.create_token(token_id, data) self.assertIsNotNone(data_ref['expires']) new_data_ref = self.token_provider_api._persistence.get_token(token_id) # MySQL doesn't store microseconds, so discard them before testing data_ref['expires'] = data_ref['expires'].replace(microsecond=0) new_data_ref['expires'] = new_data_ref['expires'].replace( microsecond=0) self.assertEqual(data_ref, new_data_ref) def check_list_revoked_tokens(self, token_infos): revocation_list = self.token_provider_api.list_revoked_tokens() revoked_ids = [x['id'] for x in revocation_list] revoked_audit_ids = [x['audit_id'] for x in revocation_list] self._assert_revoked_token_list_matches_token_persistence(revoked_ids) for token_id, audit_id in token_infos: self.assertIn(token_id, revoked_ids) self.assertIn(audit_id, revoked_audit_ids) def delete_token(self): token_id = uuid.uuid4().hex audit_id = uuid.uuid4().hex data = {'id_hash': token_id, 'id': token_id, 'a': 'b', 'user': {'id': 'testuserid'}, 'token_data': {'token': {'audit_ids': [audit_id]}}} data_ref = self.token_provider_api._persistence.create_token(token_id, data) self.token_provider_api._persistence.delete_token(token_id) self.assertRaises( exception.TokenNotFound, self.token_provider_api._persistence.get_token, data_ref['id']) self.assertRaises( exception.TokenNotFound, self.token_provider_api._persistence.delete_token, data_ref['id']) return (token_id, audit_id) def test_list_revoked_tokens_returns_empty_list(self): revoked_ids = [x['id'] for x in self.token_provider_api.list_revoked_tokens()] self._assert_revoked_token_list_matches_token_persistence(revoked_ids) self.assertEqual([], revoked_ids) def test_list_revoked_tokens_for_single_token(self): self.check_list_revoked_tokens([self.delete_token()]) def test_list_revoked_tokens_for_multiple_tokens(self): self.check_list_revoked_tokens([self.delete_token() for x in range(2)]) def test_flush_expired_token(self): token_id = uuid.uuid4().hex expire_time = timeutils.utcnow() - datetime.timedelta(minutes=1) data = {'id_hash': token_id, 'id': token_id, 'a': 'b', 'expires': expire_time, 'trust_id': None, 'user': {'id': 'testuserid'}} data_ref = self.token_provider_api._persistence.create_token(token_id, data) data_ref.pop('user_id') self.assertDictEqual(data, data_ref) token_id = uuid.uuid4().hex expire_time = timeutils.utcnow() + datetime.timedelta(minutes=1) data = {'id_hash': token_id, 'id': token_id, 'a': 'b', 'expires': expire_time, 'trust_id': None, 'user': {'id': 'testuserid'}} data_ref = self.token_provider_api._persistence.create_token(token_id, data) data_ref.pop('user_id') self.assertDictEqual(data, data_ref) self.token_provider_api._persistence.flush_expired_tokens() tokens = self.token_provider_api._persistence._list_tokens( 'testuserid') self.assertEqual(1, len(tokens)) self.assertIn(token_id, tokens) @unit.skip_if_cache_disabled('token') def test_revocation_list_cache(self): expire_time = timeutils.utcnow() + datetime.timedelta(minutes=10) token_id = uuid.uuid4().hex token_data = {'id_hash': token_id, 'id': token_id, 'a': 'b', 'expires': expire_time, 'trust_id': None, 'user': {'id': 'testuserid'}, 'token_data': {'token': { 'audit_ids': [uuid.uuid4().hex]}}} token2_id = uuid.uuid4().hex token2_data = {'id_hash': token2_id, 'id': token2_id, 'a': 'b', 'expires': expire_time, 'trust_id': None, 'user': {'id': 'testuserid'}, 'token_data': {'token': { 'audit_ids': [uuid.uuid4().hex]}}} # Create 2 Tokens. self.token_provider_api._persistence.create_token(token_id, token_data) self.token_provider_api._persistence.create_token(token2_id, token2_data) # Verify the revocation list is empty. self.assertEqual( [], self.token_provider_api._persistence.list_revoked_tokens()) self.assertEqual([], self.token_provider_api.list_revoked_tokens()) # Delete a token directly, bypassing the manager. self.token_provider_api._persistence.driver.delete_token(token_id) # Verify the revocation list is still empty. self.assertEqual( [], self.token_provider_api._persistence.list_revoked_tokens()) self.assertEqual([], self.token_provider_api.list_revoked_tokens()) # Invalidate the revocation list. self.token_provider_api._persistence.invalidate_revocation_list() # Verify the deleted token is in the revocation list. revoked_ids = [x['id'] for x in self.token_provider_api.list_revoked_tokens()] self._assert_revoked_token_list_matches_token_persistence(revoked_ids) self.assertIn(token_id, revoked_ids) # Delete the second token, through the manager self.token_provider_api._persistence.delete_token(token2_id) revoked_ids = [x['id'] for x in self.token_provider_api.list_revoked_tokens()] self._assert_revoked_token_list_matches_token_persistence(revoked_ids) # Verify both tokens are in the revocation list. self.assertIn(token_id, revoked_ids) self.assertIn(token2_id, revoked_ids) def _test_predictable_revoked_pki_token_id(self, hash_fn): token_id = self._create_token_id() token_id_hash = hash_fn(token_id.encode('utf-8')).hexdigest() token = {'user': {'id': uuid.uuid4().hex}, 'token_data': {'token': {'audit_ids': [uuid.uuid4().hex]}}} self.token_provider_api._persistence.create_token(token_id, token) self.token_provider_api._persistence.delete_token(token_id) revoked_ids = [x['id'] for x in self.token_provider_api.list_revoked_tokens()] self._assert_revoked_token_list_matches_token_persistence(revoked_ids) self.assertIn(token_id_hash, revoked_ids) self.assertNotIn(token_id, revoked_ids) for t in self.token_provider_api._persistence.list_revoked_tokens(): self.assertIn('expires', t) def test_predictable_revoked_pki_token_id_default(self): self._test_predictable_revoked_pki_token_id(hashlib.md5) def test_predictable_revoked_pki_token_id_sha256(self): self.config_fixture.config(group='token', hash_algorithm='sha256') self._test_predictable_revoked_pki_token_id(hashlib.sha256) def test_predictable_revoked_uuid_token_id(self): token_id = uuid.uuid4().hex token = {'user': {'id': uuid.uuid4().hex}, 'token_data': {'token': {'audit_ids': [uuid.uuid4().hex]}}} self.token_provider_api._persistence.create_token(token_id, token) self.token_provider_api._persistence.delete_token(token_id) revoked_tokens = self.token_provider_api.list_revoked_tokens() revoked_ids = [x['id'] for x in revoked_tokens] self._assert_revoked_token_list_matches_token_persistence(revoked_ids) self.assertIn(token_id, revoked_ids) for t in revoked_tokens: self.assertIn('expires', t) def test_create_unicode_token_id(self): token_id = six.text_type(self._create_token_id()) self.create_token_sample_data(token_id=token_id) self.token_provider_api._persistence.get_token(token_id) def test_create_unicode_user_id(self): user_id = six.text_type(uuid.uuid4().hex) token_id, data = self.create_token_sample_data(user_id=user_id) self.token_provider_api._persistence.get_token(token_id) def test_token_expire_timezone(self): @test_utils.timezone def _create_token(expire_time): token_id = uuid.uuid4().hex user_id = six.text_type(uuid.uuid4().hex) return self.create_token_sample_data(token_id=token_id, user_id=user_id, expires=expire_time) for d in ['+0', '-11', '-8', '-5', '+5', '+8', '+14']: test_utils.TZ = 'UTC' + d expire_time = timeutils.utcnow() + datetime.timedelta(minutes=1) token_id, data_in = _create_token(expire_time) data_get = self.token_provider_api._persistence.get_token(token_id) self.assertEqual(data_in['id'], data_get['id'], 'TZ=%s' % test_utils.TZ) expire_time_expired = ( timeutils.utcnow() + datetime.timedelta(minutes=-1)) token_id, data_in = _create_token(expire_time_expired) self.assertRaises(exception.TokenNotFound, self.token_provider_api._persistence.get_token, data_in['id']) class TokenCacheInvalidation(object): def _create_test_data(self): self.user = unit.new_user_ref( domain_id=CONF.identity.default_domain_id) self.tenant = unit.new_project_ref( domain_id=CONF.identity.default_domain_id) # Create an equivalent of a scoped token token_dict = {'user': self.user, 'tenant': self.tenant, 'metadata': {}, 'id': 'placeholder'} token_id, data = self.token_provider_api.issue_v2_token(token_dict) self.scoped_token_id = token_id # ..and an un-scoped one token_dict = {'user': self.user, 'tenant': None, 'metadata': {}, 'id': 'placeholder'} token_id, data = self.token_provider_api.issue_v2_token(token_dict) self.unscoped_token_id = token_id # Validate them, in the various ways possible - this will load the # responses into the token cache. self._check_scoped_tokens_are_valid() self._check_unscoped_tokens_are_valid() def _check_unscoped_tokens_are_invalid(self): self.assertRaises( exception.TokenNotFound, self.token_provider_api.validate_token, self.unscoped_token_id) self.assertRaises( exception.TokenNotFound, self.token_provider_api.validate_v2_token, self.unscoped_token_id) def _check_scoped_tokens_are_invalid(self): self.assertRaises( exception.TokenNotFound, self.token_provider_api.validate_token, self.scoped_token_id) self.assertRaises( exception.TokenNotFound, self.token_provider_api.validate_token, self.scoped_token_id, self.tenant['id']) self.assertRaises( exception.TokenNotFound, self.token_provider_api.validate_v2_token, self.scoped_token_id) self.assertRaises( exception.TokenNotFound, self.token_provider_api.validate_v2_token, self.scoped_token_id, self.tenant['id']) def _check_scoped_tokens_are_valid(self): self.token_provider_api.validate_token(self.scoped_token_id) self.token_provider_api.validate_token( self.scoped_token_id, belongs_to=self.tenant['id']) self.token_provider_api.validate_v2_token(self.scoped_token_id) self.token_provider_api.validate_v2_token( self.scoped_token_id, belongs_to=self.tenant['id']) def _check_unscoped_tokens_are_valid(self): self.token_provider_api.validate_token(self.unscoped_token_id) self.token_provider_api.validate_v2_token(self.unscoped_token_id) def test_delete_unscoped_token(self): self.token_provider_api._persistence.delete_token( self.unscoped_token_id) self._check_unscoped_tokens_are_invalid() self._check_scoped_tokens_are_valid() def test_delete_scoped_token_by_id(self): self.token_provider_api._persistence.delete_token(self.scoped_token_id) self._check_scoped_tokens_are_invalid() self._check_unscoped_tokens_are_valid() def test_delete_scoped_token_by_user(self): self.token_provider_api._persistence.delete_tokens(self.user['id']) # Since we are deleting all tokens for this user, they should all # now be invalid. self._check_scoped_tokens_are_invalid() self._check_unscoped_tokens_are_invalid() def test_delete_scoped_token_by_user_and_tenant(self): self.token_provider_api._persistence.delete_tokens( self.user['id'], tenant_id=self.tenant['id']) self._check_scoped_tokens_are_invalid() self._check_unscoped_tokens_are_valid() keystone-9.0.0/keystone/tests/unit/token/test_pki_provider.py0000664000567000056710000000177312701407102025734 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.tests import unit from keystone.token.providers import pki class TestPkiTokenProvider(unit.TestCase): def setUp(self): super(TestPkiTokenProvider, self).setUp() self.provider = pki.Provider() def test_supports_bind_authentication_returns_true(self): self.assertTrue(self.provider._supports_bind_authentication) def test_need_persistence_return_true(self): self.assertIs(True, self.provider.needs_persistence()) keystone-9.0.0/keystone/tests/unit/token/test_provider.py0000664000567000056710000000210012701407102025052 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import six from six.moves import urllib from keystone.tests import unit from keystone.token import provider class TestRandomStrings(unit.BaseTestCase): def test_strings_are_url_safe(self): s = provider.random_urlsafe_str() self.assertEqual(s, urllib.parse.quote_plus(s)) def test_strings_can_be_converted_to_bytes(self): s = provider.random_urlsafe_str() self.assertIsInstance(s, six.text_type) b = provider.random_urlsafe_str_to_bytes(s) self.assertIsInstance(b, six.binary_type) keystone-9.0.0/keystone/tests/unit/test_backend_rules.py0000664000567000056710000000454612701407102024721 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone import exception from keystone.tests import unit from keystone.tests.unit.policy import test_backends as policy_tests class RulesPolicy(unit.TestCase, policy_tests.PolicyTests): def setUp(self): super(RulesPolicy, self).setUp() self.load_backends() def config_overrides(self): super(RulesPolicy, self).config_overrides() self.config_fixture.config(group='policy', driver='rules') def test_create(self): self.assertRaises(exception.NotImplemented, super(RulesPolicy, self).test_create) def test_get(self): self.assertRaises(exception.NotImplemented, super(RulesPolicy, self).test_get) def test_list(self): self.assertRaises(exception.NotImplemented, super(RulesPolicy, self).test_list) def test_update(self): self.assertRaises(exception.NotImplemented, super(RulesPolicy, self).test_update) def test_delete(self): self.assertRaises(exception.NotImplemented, super(RulesPolicy, self).test_delete) def test_get_policy_returns_not_found(self): self.assertRaises(exception.NotImplemented, super(RulesPolicy, self).test_get_policy_returns_not_found) def test_update_policy_returns_not_found(self): self.assertRaises(exception.NotImplemented, super(RulesPolicy, self).test_update_policy_returns_not_found) def test_delete_policy_returns_not_found(self): self.assertRaises(exception.NotImplemented, super(RulesPolicy, self).test_delete_policy_returns_not_found) keystone-9.0.0/keystone/tests/unit/default_catalog.templates0000664000567000056710000000145312701407102025537 0ustar jenkinsjenkins00000000000000# config for templated.Catalog, using camelCase because I don't want to do # translations for keystone compat catalog.RegionOne.identity.publicURL = http://localhost:$(public_port)s/v2.0 catalog.RegionOne.identity.adminURL = http://localhost:$(admin_port)s/v2.0 catalog.RegionOne.identity.internalURL = http://localhost:$(admin_port)s/v2.0 catalog.RegionOne.identity.name = 'Identity Service' catalog.RegionOne.identity.id = 1 # fake compute service for now to help novaclient tests work catalog.RegionOne.compute.publicURL = http://localhost:8774/v1.1/$(tenant_id)s catalog.RegionOne.compute.adminURL = http://localhost:8774/v1.1/$(tenant_id)s catalog.RegionOne.compute.internalURL = http://localhost:8774/v1.1/$(tenant_id)s catalog.RegionOne.compute.name = 'Compute Service' catalog.RegionOne.compute.id = 2 keystone-9.0.0/keystone/tests/unit/default_fixtures.py0000664000567000056710000000751212701407102024432 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(dolph): please try to avoid additional fixtures if possible; test suite # performance may be negatively affected. DEFAULT_DOMAIN_ID = 'default' TENANTS = [ { 'id': 'bar', 'name': 'BAR', 'domain_id': DEFAULT_DOMAIN_ID, 'description': 'description', 'enabled': True, 'parent_id': DEFAULT_DOMAIN_ID, 'is_domain': False, }, { 'id': 'baz', 'name': 'BAZ', 'domain_id': DEFAULT_DOMAIN_ID, 'description': 'description', 'enabled': True, 'parent_id': DEFAULT_DOMAIN_ID, 'is_domain': False, }, { 'id': 'mtu', 'name': 'MTU', 'description': 'description', 'enabled': True, 'domain_id': DEFAULT_DOMAIN_ID, 'parent_id': DEFAULT_DOMAIN_ID, 'is_domain': False, }, { 'id': 'service', 'name': 'service', 'description': 'description', 'enabled': True, 'domain_id': DEFAULT_DOMAIN_ID, 'parent_id': DEFAULT_DOMAIN_ID, 'is_domain': False, } ] # NOTE(ja): a role of keystone_admin is done in setUp USERS = [ # NOTE(morganfainberg): Admin user for replacing admin_token_auth { 'id': 'reqadmin', 'name': 'REQ_ADMIN', 'domain_id': DEFAULT_DOMAIN_ID, 'password': 'password', 'tenants': [], 'enabled': True }, { 'id': 'foo', 'name': 'FOO', 'domain_id': DEFAULT_DOMAIN_ID, 'password': 'foo2', 'tenants': ['bar'], 'enabled': True, 'email': 'foo@bar.com', }, { 'id': 'two', 'name': 'TWO', 'domain_id': DEFAULT_DOMAIN_ID, 'password': 'two2', 'enabled': True, 'default_project_id': 'baz', 'tenants': ['baz'], 'email': 'two@three.com', }, { 'id': 'badguy', 'name': 'BadGuy', 'domain_id': DEFAULT_DOMAIN_ID, 'password': 'bad', 'enabled': False, 'default_project_id': 'baz', 'tenants': ['baz'], 'email': 'bad@guy.com', }, { 'id': 'sna', 'name': 'SNA', 'domain_id': DEFAULT_DOMAIN_ID, 'password': 'snafu', 'enabled': True, 'tenants': ['bar'], 'email': 'sna@snl.coom', } ] ROLES = [ { 'id': 'admin', 'name': 'admin', 'domain_id': None, }, { 'id': 'member', 'name': 'Member', 'domain_id': None, }, { 'id': '9fe2ff9ee4384b1894a90878d3e92bab', 'name': '_member_', 'domain_id': None, }, { 'id': 'other', 'name': 'Other', 'domain_id': None, }, { 'id': 'browser', 'name': 'Browser', 'domain_id': None, }, { 'id': 'writer', 'name': 'Writer', 'domain_id': None, }, { 'id': 'service', 'name': 'Service', 'domain_id': None, } ] # NOTE(morganfainberg): Admin assignment for replacing admin_token_auth ROLE_ASSIGNMENTS = [ { 'user': 'reqadmin', 'tenant_id': 'service', 'role_id': 'admin' }, ] DOMAINS = [{'description': (u'The default domain'), 'enabled': True, 'id': DEFAULT_DOMAIN_ID, 'name': u'Default'}] keystone-9.0.0/keystone/tests/unit/core.py0000664000567000056710000007545412701407105022022 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import absolute_import import atexit import base64 import datetime import functools import hashlib import json import logging import os import re import shutil import socket import sys import uuid import warnings import fixtures from oslo_config import cfg from oslo_config import fixture as config_fixture from oslo_context import context as oslo_context from oslo_context import fixture as oslo_ctx_fixture from oslo_log import fixture as log_fixture from oslo_log import log from oslo_utils import timeutils from oslotest import mockpatch from paste.deploy import loadwsgi import six from sqlalchemy import exc import testtools from testtools import testcase # NOTE(ayoung) # environment.use_eventlet must run before any of the code that will # call the eventlet monkeypatching. from keystone.common import environment # noqa environment.use_eventlet() from keystone import auth from keystone.common import config from keystone.common import dependency from keystone.common.kvs import core as kvs_core from keystone.common import sql from keystone import exception from keystone import notifications from keystone.server import common from keystone.tests.unit import ksfixtures from keystone.version import controllers from keystone.version import service config.configure() PID = six.text_type(os.getpid()) TESTSDIR = os.path.dirname(os.path.abspath(__file__)) TESTCONF = os.path.join(TESTSDIR, 'config_files') ROOTDIR = os.path.normpath(os.path.join(TESTSDIR, '..', '..', '..')) VENDOR = os.path.join(ROOTDIR, 'vendor') ETCDIR = os.path.join(ROOTDIR, 'etc') def _calc_tmpdir(): env_val = os.environ.get('KEYSTONE_TEST_TEMP_DIR') if not env_val: return os.path.join(TESTSDIR, 'tmp', PID) return os.path.join(env_val, PID) TMPDIR = _calc_tmpdir() CONF = cfg.CONF log.register_options(CONF) IN_MEM_DB_CONN_STRING = 'sqlite://' TIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ' exception._FATAL_EXCEPTION_FORMAT_ERRORS = True os.makedirs(TMPDIR) atexit.register(shutil.rmtree, TMPDIR) class dirs(object): @staticmethod def root(*p): return os.path.join(ROOTDIR, *p) @staticmethod def etc(*p): return os.path.join(ETCDIR, *p) @staticmethod def tests(*p): return os.path.join(TESTSDIR, *p) @staticmethod def tmp(*p): return os.path.join(TMPDIR, *p) @staticmethod def tests_conf(*p): return os.path.join(TESTCONF, *p) # keystone.common.sql.initialize() for testing. DEFAULT_TEST_DB_FILE = dirs.tmp('test.db') class EggLoader(loadwsgi.EggLoader): _basket = {} def find_egg_entry_point(self, object_type, name=None): egg_key = '%s:%s' % (object_type, name) egg_ep = self._basket.get(egg_key) if not egg_ep: egg_ep = super(EggLoader, self).find_egg_entry_point( object_type, name=name) self._basket[egg_key] = egg_ep return egg_ep # NOTE(dstanek): class paths were remove from the keystone-paste.ini in # favor of using entry points. This caused tests to slow to a crawl # since we reload the application object for each RESTful test. This # monkey-patching adds caching to paste deploy's egg lookup. loadwsgi.EggLoader = EggLoader @atexit.register def remove_test_databases(): db = dirs.tmp('test.db') if os.path.exists(db): os.unlink(db) pristine = dirs.tmp('test.db.pristine') if os.path.exists(pristine): os.unlink(pristine) def generate_paste_config(extension_name): # Generate a file, based on keystone-paste.ini, that is named: # extension_name.ini, and includes extension_name in the pipeline with open(dirs.etc('keystone-paste.ini'), 'r') as f: contents = f.read() new_contents = contents.replace(' service_v3', ' %s service_v3' % (extension_name)) new_paste_file = dirs.tmp(extension_name + '.ini') with open(new_paste_file, 'w') as f: f.write(new_contents) return new_paste_file def remove_generated_paste_config(extension_name): # Remove the generated paste config file, named extension_name.ini paste_file_to_remove = dirs.tmp(extension_name + '.ini') os.remove(paste_file_to_remove) def skip_if_cache_disabled(*sections): """This decorator is used to skip a test if caching is disabled. Caching can be disabled either globally or for a specific section. In the code fragment:: @skip_if_cache_is_disabled('assignment', 'token') def test_method(*args): ... The method test_method would be skipped if caching is disabled globally via the `enabled` option in the `cache` section of the configuration or if the `caching` option is set to false in either `assignment` or `token` sections of the configuration. This decorator can be used with no arguments to only check global caching. If a specified configuration section does not define the `caching` option, this decorator makes the same assumption as the `should_cache_fn` in keystone.common.cache that caching should be enabled. """ def wrapper(f): @functools.wraps(f) def inner(*args, **kwargs): if not CONF.cache.enabled: raise testcase.TestSkipped('Cache globally disabled.') for s in sections: conf_sec = getattr(CONF, s, None) if conf_sec is not None: if not getattr(conf_sec, 'caching', True): raise testcase.TestSkipped('%s caching disabled.' % s) return f(*args, **kwargs) return inner return wrapper def skip_if_cache_is_enabled(*sections): def wrapper(f): @functools.wraps(f) def inner(*args, **kwargs): if CONF.cache.enabled: for s in sections: conf_sec = getattr(CONF, s, None) if conf_sec is not None: if getattr(conf_sec, 'caching', True): raise testcase.TestSkipped('%s caching enabled.' % s) return f(*args, **kwargs) return inner return wrapper def skip_if_no_multiple_domains_support(f): """Decorator to skip tests for identity drivers limited to one domain.""" @functools.wraps(f) def wrapper(*args, **kwargs): test_obj = args[0] if not test_obj.identity_api.multiple_domains_supported: raise testcase.TestSkipped('No multiple domains support') return f(*args, **kwargs) return wrapper class UnexpectedExit(Exception): pass def new_region_ref(parent_region_id=None, **kwargs): ref = { 'id': uuid.uuid4().hex, 'description': uuid.uuid4().hex, 'parent_region_id': parent_region_id} ref.update(kwargs) return ref def new_service_ref(**kwargs): ref = { 'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex, 'description': uuid.uuid4().hex, 'enabled': True, 'type': uuid.uuid4().hex, } ref.update(kwargs) return ref NEEDS_REGION_ID = object() def new_endpoint_ref(service_id, interface='public', region_id=NEEDS_REGION_ID, **kwargs): ref = { 'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex, 'description': uuid.uuid4().hex, 'interface': interface, 'service_id': service_id, 'url': 'https://' + uuid.uuid4().hex + '.com', } if region_id is NEEDS_REGION_ID: ref['region_id'] = uuid.uuid4().hex elif region_id is None and kwargs.get('region') is not None: # pre-3.2 form endpoints are not supported by this function raise NotImplementedError("use new_endpoint_ref_with_region") else: ref['region_id'] = region_id ref.update(kwargs) return ref def new_endpoint_ref_with_region(service_id, region, interface='public', **kwargs): """Define an endpoint_ref having a pre-3.2 form. Contains the deprecated 'region' instead of 'region_id'. """ ref = new_endpoint_ref(service_id, interface, region=region, region_id='invalid', **kwargs) del ref['region_id'] return ref def new_domain_ref(**kwargs): ref = { 'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex, 'description': uuid.uuid4().hex, 'enabled': True } ref.update(kwargs) return ref def new_project_ref(domain_id=None, is_domain=False, **kwargs): ref = { 'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex, 'description': uuid.uuid4().hex, 'enabled': True, 'domain_id': domain_id, 'is_domain': is_domain, } # NOTE(henry-nash): We don't include parent_id in the initial list above # since specifying it is optional depending on where the project sits in # the hierarchy (and a parent_id of None has meaning - i.e. it's a top # level project). ref.update(kwargs) return ref def new_user_ref(domain_id, project_id=None, **kwargs): ref = { 'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex, 'enabled': True, 'domain_id': domain_id, 'email': uuid.uuid4().hex, 'password': uuid.uuid4().hex, } if project_id: ref['default_project_id'] = project_id ref.update(kwargs) return ref def new_federated_user_ref(idp_id=None, protocol_id=None, **kwargs): ref = { 'idp_id': idp_id or 'ORG_IDP', 'protocol_id': protocol_id or 'saml2', 'unique_id': uuid.uuid4().hex, 'display_name': uuid.uuid4().hex, } ref.update(kwargs) return ref def new_group_ref(domain_id, **kwargs): ref = { 'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex, 'description': uuid.uuid4().hex, 'domain_id': domain_id } ref.update(kwargs) return ref def new_credential_ref(user_id, project_id=None, type='cert', **kwargs): ref = { 'id': uuid.uuid4().hex, 'user_id': user_id, 'type': type, } if project_id: ref['project_id'] = project_id if 'blob' not in kwargs: ref['blob'] = uuid.uuid4().hex ref.update(kwargs) return ref def new_cert_credential(user_id, project_id=None, blob=None, **kwargs): if blob is None: blob = {'access': uuid.uuid4().hex, 'secret': uuid.uuid4().hex} credential = new_credential_ref(user_id=user_id, project_id=project_id, blob=json.dumps(blob), type='cert', **kwargs) return blob, credential def new_ec2_credential(user_id, project_id=None, blob=None, **kwargs): if blob is None: blob = { 'access': uuid.uuid4().hex, 'secret': uuid.uuid4().hex, 'trust_id': None } if 'id' not in kwargs: access = blob['access'].encode('utf-8') kwargs['id'] = hashlib.sha256(access).hexdigest() credential = new_credential_ref(user_id=user_id, project_id=project_id, blob=json.dumps(blob), type='ec2', **kwargs) return blob, credential def new_totp_credential(user_id, project_id=None, blob=None): if not blob: blob = base64.b32encode(uuid.uuid4().hex).rstrip('=') credential = new_credential_ref(user_id=user_id, project_id=project_id, blob=blob, type='totp') return credential def new_role_ref(**kwargs): ref = { 'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex, 'domain_id': None } ref.update(kwargs) return ref def new_policy_ref(**kwargs): ref = { 'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex, 'description': uuid.uuid4().hex, 'enabled': True, # Store serialized JSON data as the blob to mimic real world usage. 'blob': json.dumps({'data': uuid.uuid4().hex, }), 'type': uuid.uuid4().hex, } ref.update(kwargs) return ref def new_trust_ref(trustor_user_id, trustee_user_id, project_id=None, impersonation=None, expires=None, role_ids=None, role_names=None, remaining_uses=None, allow_redelegation=False, redelegation_count=None, **kwargs): ref = { 'id': uuid.uuid4().hex, 'trustor_user_id': trustor_user_id, 'trustee_user_id': trustee_user_id, 'impersonation': impersonation or False, 'project_id': project_id, 'remaining_uses': remaining_uses, 'allow_redelegation': allow_redelegation, } if isinstance(redelegation_count, int): ref.update(redelegation_count=redelegation_count) if isinstance(expires, six.string_types): ref['expires_at'] = expires elif isinstance(expires, dict): ref['expires_at'] = ( timeutils.utcnow() + datetime.timedelta(**expires) ).strftime(TIME_FORMAT) elif expires is None: pass else: raise NotImplementedError('Unexpected value for "expires"') role_ids = role_ids or [] role_names = role_names or [] if role_ids or role_names: ref['roles'] = [] for role_id in role_ids: ref['roles'].append({'id': role_id}) for role_name in role_names: ref['roles'].append({'name': role_name}) ref.update(kwargs) return ref def create_user(api, domain_id, **kwargs): """Create a user via the API. Keep the created password. The password is saved and restored when api.create_user() is called. Only use this routine if there is a requirement for the user object to have a valid password after api.create_user() is called. """ user = new_user_ref(domain_id=domain_id, **kwargs) password = user['password'] user = api.create_user(user) user['password'] = password return user class BaseTestCase(testtools.TestCase): """Light weight base test class. This is a placeholder that will eventually go away once the setup/teardown in TestCase is properly trimmed down to the bare essentials. This is really just a play to speed up the tests by eliminating unnecessary work. """ def setUp(self): super(BaseTestCase, self).setUp() self.useFixture(fixtures.NestedTempfile()) self.useFixture(fixtures.TempHomeDir()) self.useFixture(mockpatch.PatchObject(sys, 'exit', side_effect=UnexpectedExit)) self.useFixture(log_fixture.get_logging_handle_error_fixture()) warnings.filterwarnings('error', category=DeprecationWarning, module='^keystone\\.') warnings.simplefilter('error', exc.SAWarning) self.addCleanup(warnings.resetwarnings) # Ensure we have an empty threadlocal context at the start of each # test. self.assertIsNone(oslo_context.get_current()) self.useFixture(oslo_ctx_fixture.ClearRequestContext()) def cleanup_instance(self, *names): """Create a function suitable for use with self.addCleanup. :returns: a callable that uses a closure to delete instance attributes """ def cleanup(): for name in names: # TODO(dstanek): remove this 'if' statement once # load_backend in test_backend_ldap is only called once # per test if hasattr(self, name): delattr(self, name) return cleanup class TestCase(BaseTestCase): def config_files(self): return [] def _policy_fixture(self): return ksfixtures.Policy(dirs.etc('policy.json'), self.config_fixture) def config_overrides(self): # NOTE(morganfainberg): enforce config_overrides can only ever be # called a single time. assert self.__config_overrides_called is False self.__config_overrides_called = True signing_certfile = 'examples/pki/certs/signing_cert.pem' signing_keyfile = 'examples/pki/private/signing_key.pem' self.useFixture(self._policy_fixture()) self.config_fixture.config( # TODO(morganfainberg): Make Cache Testing a separate test case # in tempest, and move it out of the base unit tests. group='cache', backend='dogpile.cache.memory', enabled=True, proxies=['oslo_cache.testing.CacheIsolatingProxy']) self.config_fixture.config( group='catalog', driver='sql', template_file=dirs.tests('default_catalog.templates')) self.config_fixture.config( group='kvs', backends=[ ('keystone.tests.unit.test_kvs.' 'KVSBackendForcedKeyMangleFixture'), 'keystone.tests.unit.test_kvs.KVSBackendFixture']) self.config_fixture.config( group='signing', certfile=signing_certfile, keyfile=signing_keyfile, ca_certs='examples/pki/certs/cacert.pem') self.config_fixture.config(group='token', driver='kvs') self.config_fixture.config( group='saml', certfile=signing_certfile, keyfile=signing_keyfile) self.config_fixture.config( default_log_levels=[ 'amqp=WARN', 'amqplib=WARN', 'boto=WARN', 'qpid=WARN', 'sqlalchemy=WARN', 'suds=INFO', 'oslo.messaging=INFO', 'iso8601=WARN', 'requests.packages.urllib3.connectionpool=WARN', 'routes.middleware=INFO', 'stevedore.extension=INFO', 'keystone.notifications=INFO', 'keystone.common.ldap=INFO', ]) self.auth_plugin_config_override() def auth_plugin_config_override(self, methods=None, **method_classes): self.useFixture( ksfixtures.ConfigAuthPlugins(self.config_fixture, methods, **method_classes)) def _assert_config_overrides_called(self): assert self.__config_overrides_called is True def setUp(self): super(TestCase, self).setUp() self.__config_overrides_called = False self.__load_backends_called = False self.addCleanup(CONF.reset) self.config_fixture = self.useFixture(config_fixture.Config(CONF)) self.addCleanup(delattr, self, 'config_fixture') self.config(self.config_files()) # NOTE(morganfainberg): mock the auth plugin setup to use the config # fixture which automatically unregisters options when performing # cleanup. def mocked_register_auth_plugin_opt(conf, opt): self.config_fixture.register_opt(opt, group='auth') self.useFixture(mockpatch.PatchObject( config, '_register_auth_plugin_opt', new=mocked_register_auth_plugin_opt)) self.sql_driver_version_overrides = {} self.config_overrides() # NOTE(morganfainberg): ensure config_overrides has been called. self.addCleanup(self._assert_config_overrides_called) self.useFixture(fixtures.FakeLogger(level=logging.DEBUG)) # NOTE(morganfainberg): This code is a copy from the oslo-incubator # log module. This is not in a function or otherwise available to use # without having a CONF object to setup logging. This should help to # reduce the log size by limiting what we log (similar to how Keystone # would run under mod_wsgi or eventlet). for pair in CONF.default_log_levels: mod, _sep, level_name = pair.partition('=') logger = logging.getLogger(mod) logger.setLevel(level_name) self.useFixture(ksfixtures.Cache()) # Clear the registry of providers so that providers from previous # tests aren't used. self.addCleanup(dependency.reset) # Ensure Notification subscriptions and resource types are empty self.addCleanup(notifications.clear_subscribers) self.addCleanup(notifications.reset_notifier) # Reset the auth-plugin registry self.addCleanup(self.clear_auth_plugin_registry) self.addCleanup(setattr, controllers, '_VERSIONS', []) def config(self, config_files): sql.initialize() CONF(args=[], project='keystone', default_config_files=config_files) def load_backends(self): """Initializes each manager and assigns them to an attribute.""" # TODO(blk-u): Shouldn't need to clear the registry here, but some # tests call load_backends multiple times. These should be fixed to # only call load_backends once. dependency.reset() # TODO(morganfainberg): Shouldn't need to clear the registry here, but # some tests call load_backends multiple times. Since it is not # possible to re-configure a backend, we need to clear the list. This # should eventually be removed once testing has been cleaned up. kvs_core.KEY_VALUE_STORE_REGISTRY.clear() self.clear_auth_plugin_registry() drivers, _unused = common.setup_backends( load_extra_backends_fn=self.load_extra_backends) for manager_name, manager in drivers.items(): setattr(self, manager_name, manager) self.addCleanup(self.cleanup_instance(*list(drivers.keys()))) def load_extra_backends(self): """Override to load managers that aren't loaded by default. This is useful to load managers initialized by extensions. No extra backends are loaded by default. :returns: dict of name -> manager """ return {} def load_fixtures(self, fixtures): """Hacky basic and naive fixture loading based on a python module. Expects that the various APIs into the various services are already defined on `self`. """ # NOTE(dstanek): create a list of attribute names to be removed # from this instance during cleanup fixtures_to_cleanup = [] # TODO(termie): doing something from json, probably based on Django's # loaddata will be much preferred. if (hasattr(self, 'identity_api') and hasattr(self, 'assignment_api') and hasattr(self, 'resource_api')): for domain in fixtures.DOMAINS: try: rv = self.resource_api.create_domain(domain['id'], domain) except exception.Conflict: rv = self.resource_api.get_domain(domain['id']) except exception.NotImplemented: rv = domain attrname = 'domain_%s' % domain['id'] setattr(self, attrname, rv) fixtures_to_cleanup.append(attrname) for tenant in fixtures.TENANTS: if hasattr(self, 'tenant_%s' % tenant['id']): try: # This will clear out any roles on the project as well self.resource_api.delete_project(tenant['id']) except exception.ProjectNotFound: pass rv = self.resource_api.create_project( tenant['id'], tenant) attrname = 'tenant_%s' % tenant['id'] setattr(self, attrname, rv) fixtures_to_cleanup.append(attrname) for role in fixtures.ROLES: try: rv = self.role_api.create_role(role['id'], role) except exception.Conflict: rv = self.role_api.get_role(role['id']) attrname = 'role_%s' % role['id'] setattr(self, attrname, rv) fixtures_to_cleanup.append(attrname) for user in fixtures.USERS: user_copy = user.copy() tenants = user_copy.pop('tenants') try: existing_user = getattr(self, 'user_%s' % user['id'], None) if existing_user is not None: self.identity_api.delete_user(existing_user['id']) except exception.UserNotFound: pass # For users, the manager layer will generate the ID user_copy = self.identity_api.create_user(user_copy) # Our tests expect that the password is still in the user # record so that they can reference it, so put it back into # the dict returned. user_copy['password'] = user['password'] for tenant_id in tenants: try: self.assignment_api.add_user_to_project( tenant_id, user_copy['id']) except exception.Conflict: pass # Use the ID from the fixture as the attribute name, so # that our tests can easily reference each user dict, while # the ID in the dict will be the real public ID. attrname = 'user_%s' % user['id'] setattr(self, attrname, user_copy) fixtures_to_cleanup.append(attrname) for role_assignment in fixtures.ROLE_ASSIGNMENTS: role_id = role_assignment['role_id'] user = role_assignment['user'] tenant_id = role_assignment['tenant_id'] user_id = getattr(self, 'user_%s' % user)['id'] try: self.assignment_api.add_role_to_user_and_project( user_id, tenant_id, role_id) except exception.Conflict: pass self.addCleanup(self.cleanup_instance(*fixtures_to_cleanup)) def _paste_config(self, config): if not config.startswith('config:'): test_path = os.path.join(TESTSDIR, config) etc_path = os.path.join(ROOTDIR, 'etc', config) for path in [test_path, etc_path]: if os.path.exists('%s-paste.ini' % path): return 'config:%s-paste.ini' % path return config def loadapp(self, config, name='main'): return service.loadapp(self._paste_config(config), name=name) def clear_auth_plugin_registry(self): auth.controllers.AUTH_METHODS.clear() auth.controllers.AUTH_PLUGINS_LOADED = False def assertCloseEnoughForGovernmentWork(self, a, b, delta=3): """Asserts that two datetimes are nearly equal within a small delta. :param delta: Maximum allowable time delta, defined in seconds. """ if a == b: # Short-circuit if the values are the same. return msg = '%s != %s within %s delta' % (a, b, delta) self.assertTrue(abs(a - b).seconds <= delta, msg) def assertNotEmpty(self, l): self.assertTrue(len(l)) def assertRaisesRegexp(self, expected_exception, expected_regexp, callable_obj, *args, **kwargs): """Asserts that the message in a raised exception matches a regexp.""" try: callable_obj(*args, **kwargs) except expected_exception as exc_value: if isinstance(expected_regexp, six.string_types): expected_regexp = re.compile(expected_regexp) if isinstance(exc_value.args[0], six.text_type): if not expected_regexp.search(six.text_type(exc_value)): raise self.failureException( '"%s" does not match "%s"' % (expected_regexp.pattern, six.text_type(exc_value))) else: if not expected_regexp.search(str(exc_value)): raise self.failureException( '"%s" does not match "%s"' % (expected_regexp.pattern, str(exc_value))) else: if hasattr(expected_exception, '__name__'): excName = expected_exception.__name__ else: excName = str(expected_exception) raise self.failureException("%s not raised" % excName) @property def ipv6_enabled(self): if socket.has_ipv6: sock = None try: sock = socket.socket(socket.AF_INET6) # NOTE(Mouad): Try to bind to IPv6 loopback ip address. sock.bind(("::1", 0)) return True except socket.error: pass finally: if sock: sock.close() return False def skip_if_no_ipv6(self): if not self.ipv6_enabled: raise self.skipTest("IPv6 is not enabled in the system") def skip_if_env_not_set(self, env_var): if not os.environ.get(env_var): self.skipTest('Env variable %s is not set.' % env_var) class SQLDriverOverrides(object): """A mixin for consolidating sql-specific test overrides.""" def config_overrides(self): super(SQLDriverOverrides, self).config_overrides() # SQL specific driver overrides self.config_fixture.config(group='catalog', driver='sql') self.config_fixture.config(group='identity', driver='sql') self.config_fixture.config(group='policy', driver='sql') self.config_fixture.config(group='token', driver='sql') self.config_fixture.config(group='trust', driver='sql') def use_specific_sql_driver_version(self, driver_path, versionless_backend, version_suffix): """Add this versioned driver to the list that will be loaded. :param driver_path: The path to the drivers, e.g. 'keystone.assignment' :param versionless_backend: The name of the versionless drivers, e.g. 'backends' :param version_suffix: The suffix for the version , e.g. ``V8_`` This method assumes that versioned drivers are named: , e.g. 'V8_backends'. """ self.sql_driver_version_overrides[driver_path] = { 'versionless_backend': versionless_backend, 'versioned_backend': version_suffix + versionless_backend} keystone-9.0.0/keystone/tests/unit/test_associate_project_endpoint_extension.py0000664000567000056710000016331412701407102031614 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import uuid import mock from oslo_log import versionutils from six.moves import http_client from testtools import matchers from keystone.contrib.endpoint_filter import routers from keystone.tests import unit from keystone.tests.unit import test_v3 class EndpointFilterTestCase(test_v3.RestfulTestCase): def config_overrides(self): super(EndpointFilterTestCase, self).config_overrides() self.config_fixture.config( group='catalog', driver='endpoint_filter.sql') def setUp(self): super(EndpointFilterTestCase, self).setUp() self.default_request_url = ( '/OS-EP-FILTER/projects/%(project_id)s' '/endpoints/%(endpoint_id)s' % { 'project_id': self.default_domain_project_id, 'endpoint_id': self.endpoint_id}) class EndpointFilterDeprecateTestCase(test_v3.RestfulTestCase): @mock.patch.object(versionutils, 'report_deprecated_feature') def test_exception_happens(self, mock_deprecator): routers.EndpointFilterExtension(mock.ANY) mock_deprecator.assert_called_once_with(mock.ANY, mock.ANY) args, _kwargs = mock_deprecator.call_args self.assertIn("Remove endpoint_filter_extension from", args[1]) class EndpointFilterCRUDTestCase(EndpointFilterTestCase): def test_create_endpoint_project_association(self): """PUT /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id} Valid endpoint and project id test case. """ self.put(self.default_request_url) def test_create_endpoint_project_association_with_invalid_project(self): """PUT OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id} Invalid project id test case. """ self.put('/OS-EP-FILTER/projects/%(project_id)s' '/endpoints/%(endpoint_id)s' % { 'project_id': uuid.uuid4().hex, 'endpoint_id': self.endpoint_id}, expected_status=http_client.NOT_FOUND) def test_create_endpoint_project_association_with_invalid_endpoint(self): """PUT /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id} Invalid endpoint id test case. """ self.put('/OS-EP-FILTER/projects/%(project_id)s' '/endpoints/%(endpoint_id)s' % { 'project_id': self.default_domain_project_id, 'endpoint_id': uuid.uuid4().hex}, expected_status=http_client.NOT_FOUND) def test_create_endpoint_project_association_with_unexpected_body(self): """PUT /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id} Unexpected body in request. The body should be ignored. """ self.put(self.default_request_url, body={'project_id': self.default_domain_project_id}) def test_check_endpoint_project_association(self): """HEAD /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id} Valid project and endpoint id test case. """ self.put(self.default_request_url) self.head('/OS-EP-FILTER/projects/%(project_id)s' '/endpoints/%(endpoint_id)s' % { 'project_id': self.default_domain_project_id, 'endpoint_id': self.endpoint_id}) def test_check_endpoint_project_association_with_invalid_project(self): """HEAD /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id} Invalid project id test case. """ self.put(self.default_request_url) self.head('/OS-EP-FILTER/projects/%(project_id)s' '/endpoints/%(endpoint_id)s' % { 'project_id': uuid.uuid4().hex, 'endpoint_id': self.endpoint_id}, expected_status=http_client.NOT_FOUND) def test_check_endpoint_project_association_with_invalid_endpoint(self): """HEAD /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id} Invalid endpoint id test case. """ self.put(self.default_request_url) self.head('/OS-EP-FILTER/projects/%(project_id)s' '/endpoints/%(endpoint_id)s' % { 'project_id': self.default_domain_project_id, 'endpoint_id': uuid.uuid4().hex}, expected_status=http_client.NOT_FOUND) def test_list_endpoints_associated_with_valid_project(self): """GET /OS-EP-FILTER/projects/{project_id}/endpoints Valid project and endpoint id test case. """ self.put(self.default_request_url) resource_url = '/OS-EP-FILTER/projects/%(project_id)s/endpoints' % { 'project_id': self.default_domain_project_id} r = self.get(resource_url) self.assertValidEndpointListResponse(r, self.endpoint, resource_url=resource_url) def test_list_endpoints_associated_with_invalid_project(self): """GET /OS-EP-FILTER/projects/{project_id}/endpoints Invalid project id test case. """ self.put(self.default_request_url) self.get('/OS-EP-FILTER/projects/%(project_id)s/endpoints' % { 'project_id': uuid.uuid4().hex}, expected_status=http_client.NOT_FOUND) def test_list_projects_associated_with_endpoint(self): """GET /OS-EP-FILTER/endpoints/{endpoint_id}/projects Valid endpoint-project association test case. """ self.put(self.default_request_url) resource_url = '/OS-EP-FILTER/endpoints/%(endpoint_id)s/projects' % { 'endpoint_id': self.endpoint_id} r = self.get(resource_url) self.assertValidProjectListResponse(r, self.default_domain_project, resource_url=resource_url) def test_list_projects_with_no_endpoint_project_association(self): """GET /OS-EP-FILTER/endpoints/{endpoint_id}/projects Valid endpoint id but no endpoint-project associations test case. """ r = self.get('/OS-EP-FILTER/endpoints/%(endpoint_id)s/projects' % {'endpoint_id': self.endpoint_id}) self.assertValidProjectListResponse(r, expected_length=0) def test_list_projects_associated_with_invalid_endpoint(self): """GET /OS-EP-FILTER/endpoints/{endpoint_id}/projects Invalid endpoint id test case. """ self.get('/OS-EP-FILTER/endpoints/%(endpoint_id)s/projects' % {'endpoint_id': uuid.uuid4().hex}, expected_status=http_client.NOT_FOUND) def test_remove_endpoint_project_association(self): """DELETE /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id} Valid project id and endpoint id test case. """ self.put(self.default_request_url) self.delete('/OS-EP-FILTER/projects/%(project_id)s' '/endpoints/%(endpoint_id)s' % { 'project_id': self.default_domain_project_id, 'endpoint_id': self.endpoint_id}) def test_remove_endpoint_project_association_with_invalid_project(self): """DELETE /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id} Invalid project id test case. """ self.put(self.default_request_url) self.delete('/OS-EP-FILTER/projects/%(project_id)s' '/endpoints/%(endpoint_id)s' % { 'project_id': uuid.uuid4().hex, 'endpoint_id': self.endpoint_id}, expected_status=http_client.NOT_FOUND) def test_remove_endpoint_project_association_with_invalid_endpoint(self): """DELETE /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id} Invalid endpoint id test case. """ self.put(self.default_request_url) self.delete('/OS-EP-FILTER/projects/%(project_id)s' '/endpoints/%(endpoint_id)s' % { 'project_id': self.default_domain_project_id, 'endpoint_id': uuid.uuid4().hex}, expected_status=http_client.NOT_FOUND) def test_endpoint_project_association_cleanup_when_project_deleted(self): self.put(self.default_request_url) association_url = ('/OS-EP-FILTER/endpoints/%(endpoint_id)s/projects' % {'endpoint_id': self.endpoint_id}) r = self.get(association_url) self.assertValidProjectListResponse(r, expected_length=1) self.delete('/projects/%(project_id)s' % { 'project_id': self.default_domain_project_id}) r = self.get(association_url) self.assertValidProjectListResponse(r, expected_length=0) def test_endpoint_project_association_cleanup_when_endpoint_deleted(self): self.put(self.default_request_url) association_url = '/OS-EP-FILTER/projects/%(project_id)s/endpoints' % { 'project_id': self.default_domain_project_id} r = self.get(association_url) self.assertValidEndpointListResponse(r, expected_length=1) self.delete('/endpoints/%(endpoint_id)s' % { 'endpoint_id': self.endpoint_id}) r = self.get(association_url) self.assertValidEndpointListResponse(r, expected_length=0) @unit.skip_if_cache_disabled('catalog') def test_create_endpoint_project_association_invalidates_cache(self): # NOTE(davechen): create another endpoint which will be added to # default project, this should be done at first since # `create_endpoint` will also invalidate cache. endpoint_id2 = uuid.uuid4().hex endpoint2 = unit.new_endpoint_ref(service_id=self.service_id, region_id=self.region_id, interface='public', id=endpoint_id2) self.catalog_api.create_endpoint(endpoint_id2, endpoint2.copy()) # create endpoint project association. self.put(self.default_request_url) # should get back only one endpoint that was just created. user_id = uuid.uuid4().hex catalog = self.catalog_api.get_v3_catalog( user_id, self.default_domain_project_id) # there is only one endpoints associated with the default project. self.assertEqual(1, len(catalog[0]['endpoints'])) self.assertEqual(self.endpoint_id, catalog[0]['endpoints'][0]['id']) # add the second endpoint to default project, bypassing # catalog_api API manager. self.catalog_api.driver.add_endpoint_to_project( endpoint_id2, self.default_domain_project_id) # but, we can just get back one endpoint from the cache, since the # catalog is pulled out from cache and its haven't been invalidated. catalog = self.catalog_api.get_v3_catalog( user_id, self.default_domain_project_id) self.assertEqual(1, len(catalog[0]['endpoints'])) # remove the endpoint2 from the default project, and add it again via # catalog_api API manager. self.catalog_api.driver.remove_endpoint_from_project( endpoint_id2, self.default_domain_project_id) # add second endpoint to default project, this can be done by calling # the catalog_api API manager directly but call the REST API # instead for consistency. self.put('/OS-EP-FILTER/projects/%(project_id)s' '/endpoints/%(endpoint_id)s' % { 'project_id': self.default_domain_project_id, 'endpoint_id': endpoint_id2}) # should get back two endpoints since the cache has been # invalidated when the second endpoint was added to default project. catalog = self.catalog_api.get_v3_catalog( user_id, self.default_domain_project_id) self.assertEqual(2, len(catalog[0]['endpoints'])) ep_id_list = [catalog[0]['endpoints'][0]['id'], catalog[0]['endpoints'][1]['id']] self.assertItemsEqual([self.endpoint_id, endpoint_id2], ep_id_list) @unit.skip_if_cache_disabled('catalog') def test_remove_endpoint_from_project_invalidates_cache(self): endpoint_id2 = uuid.uuid4().hex endpoint2 = unit.new_endpoint_ref(service_id=self.service_id, region_id=self.region_id, interface='public', id=endpoint_id2) self.catalog_api.create_endpoint(endpoint_id2, endpoint2.copy()) # create endpoint project association. self.put(self.default_request_url) # add second endpoint to default project. self.put('/OS-EP-FILTER/projects/%(project_id)s' '/endpoints/%(endpoint_id)s' % { 'project_id': self.default_domain_project_id, 'endpoint_id': endpoint_id2}) # should get back only one endpoint that was just created. user_id = uuid.uuid4().hex catalog = self.catalog_api.get_v3_catalog( user_id, self.default_domain_project_id) # there are two endpoints associated with the default project. ep_id_list = [catalog[0]['endpoints'][0]['id'], catalog[0]['endpoints'][1]['id']] self.assertEqual(2, len(catalog[0]['endpoints'])) self.assertItemsEqual([self.endpoint_id, endpoint_id2], ep_id_list) # remove the endpoint2 from the default project, bypassing # catalog_api API manager. self.catalog_api.driver.remove_endpoint_from_project( endpoint_id2, self.default_domain_project_id) # but, we can just still get back two endpoints from the cache, # since the catalog is pulled out from cache and its haven't # been invalidated. catalog = self.catalog_api.get_v3_catalog( user_id, self.default_domain_project_id) self.assertEqual(2, len(catalog[0]['endpoints'])) # add back the endpoint2 to the default project, and remove it by # catalog_api API manage. self.catalog_api.driver.add_endpoint_to_project( endpoint_id2, self.default_domain_project_id) # remove the endpoint2 from the default project, this can be done # by calling the catalog_api API manager directly but call # the REST API instead for consistency. self.delete('/OS-EP-FILTER/projects/%(project_id)s' '/endpoints/%(endpoint_id)s' % { 'project_id': self.default_domain_project_id, 'endpoint_id': endpoint_id2}) # should only get back one endpoint since the cache has been # invalidated after the endpoint project association was removed. catalog = self.catalog_api.get_v3_catalog( user_id, self.default_domain_project_id) self.assertEqual(1, len(catalog[0]['endpoints'])) self.assertEqual(self.endpoint_id, catalog[0]['endpoints'][0]['id']) class EndpointFilterTokenRequestTestCase(EndpointFilterTestCase): def test_project_scoped_token_using_endpoint_filter(self): """Verify endpoints from project scoped token filtered.""" # create a project to work with ref = unit.new_project_ref(domain_id=self.domain_id) r = self.post('/projects', body={'project': ref}) project = self.assertValidProjectResponse(r, ref) # grant the user a role on the project self.put( '/projects/%(project_id)s/users/%(user_id)s/roles/%(role_id)s' % { 'user_id': self.user['id'], 'project_id': project['id'], 'role_id': self.role['id']}) # set the user's preferred project body = {'user': {'default_project_id': project['id']}} r = self.patch('/users/%(user_id)s' % { 'user_id': self.user['id']}, body=body) self.assertValidUserResponse(r) # add one endpoint to the project self.put('/OS-EP-FILTER/projects/%(project_id)s' '/endpoints/%(endpoint_id)s' % { 'project_id': project['id'], 'endpoint_id': self.endpoint_id}) # attempt to authenticate without requesting a project auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password']) r = self.post('/auth/tokens', body=auth_data) self.assertValidProjectScopedTokenResponse( r, require_catalog=True, endpoint_filter=True, ep_filter_assoc=1) self.assertEqual(project['id'], r.result['token']['project']['id']) def test_default_scoped_token_using_endpoint_filter(self): """Verify endpoints from default scoped token filtered.""" # add one endpoint to default project self.put('/OS-EP-FILTER/projects/%(project_id)s' '/endpoints/%(endpoint_id)s' % { 'project_id': self.project['id'], 'endpoint_id': self.endpoint_id}) auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=self.project['id']) r = self.post('/auth/tokens', body=auth_data) self.assertValidProjectScopedTokenResponse( r, require_catalog=True, endpoint_filter=True, ep_filter_assoc=1) self.assertEqual(self.project['id'], r.result['token']['project']['id']) # Ensure name of the service exists self.assertIn('name', r.result['token']['catalog'][0]) # region and region_id should be the same in endpoints endpoint = r.result['token']['catalog'][0]['endpoints'][0] self.assertIn('region', endpoint) self.assertIn('region_id', endpoint) self.assertEqual(endpoint['region'], endpoint['region_id']) def test_scoped_token_with_no_catalog_using_endpoint_filter(self): """Verify endpoint filter does not affect no catalog.""" self.put('/OS-EP-FILTER/projects/%(project_id)s' '/endpoints/%(endpoint_id)s' % { 'project_id': self.project['id'], 'endpoint_id': self.endpoint_id}) auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=self.project['id']) r = self.post('/auth/tokens?nocatalog', body=auth_data) self.assertValidProjectScopedTokenResponse( r, require_catalog=False) self.assertEqual(self.project['id'], r.result['token']['project']['id']) def test_invalid_endpoint_project_association(self): """Verify an invalid endpoint-project association is handled.""" # add first endpoint to default project self.put('/OS-EP-FILTER/projects/%(project_id)s' '/endpoints/%(endpoint_id)s' % { 'project_id': self.project['id'], 'endpoint_id': self.endpoint_id}) # create a second temporary endpoint endpoint_id2 = uuid.uuid4().hex endpoint2 = unit.new_endpoint_ref(service_id=self.service_id, region_id=self.region_id, interface='public', id=endpoint_id2) self.catalog_api.create_endpoint(endpoint_id2, endpoint2.copy()) # add second endpoint to default project self.put('/OS-EP-FILTER/projects/%(project_id)s' '/endpoints/%(endpoint_id)s' % { 'project_id': self.project['id'], 'endpoint_id': endpoint_id2}) # remove the temporary reference # this will create inconsistency in the endpoint filter table # which is fixed during the catalog creation for token request self.catalog_api.delete_endpoint(endpoint_id2) auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=self.project['id']) r = self.post('/auth/tokens', body=auth_data) self.assertValidProjectScopedTokenResponse( r, require_catalog=True, endpoint_filter=True, ep_filter_assoc=1) self.assertEqual(self.project['id'], r.result['token']['project']['id']) def test_disabled_endpoint(self): """Test that a disabled endpoint is handled.""" # Add an enabled endpoint to the default project self.put('/OS-EP-FILTER/projects/%(project_id)s' '/endpoints/%(endpoint_id)s' % { 'project_id': self.project['id'], 'endpoint_id': self.endpoint_id}) # Add a disabled endpoint to the default project. # Create a disabled endpoint that's like the enabled one. disabled_endpoint_ref = copy.copy(self.endpoint) disabled_endpoint_id = uuid.uuid4().hex disabled_endpoint_ref.update({ 'id': disabled_endpoint_id, 'enabled': False, 'interface': 'internal' }) self.catalog_api.create_endpoint(disabled_endpoint_id, disabled_endpoint_ref) self.put('/OS-EP-FILTER/projects/%(project_id)s' '/endpoints/%(endpoint_id)s' % { 'project_id': self.project['id'], 'endpoint_id': disabled_endpoint_id}) # Authenticate to get token with catalog auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=self.project['id']) r = self.post('/auth/tokens', body=auth_data) endpoints = r.result['token']['catalog'][0]['endpoints'] endpoint_ids = [ep['id'] for ep in endpoints] self.assertEqual([self.endpoint_id], endpoint_ids) def test_multiple_endpoint_project_associations(self): def _create_an_endpoint(): endpoint_ref = unit.new_endpoint_ref(service_id=self.service_id, interface='public', region_id=self.region_id) r = self.post('/endpoints', body={'endpoint': endpoint_ref}) return r.result['endpoint']['id'] # create three endpoints endpoint_id1 = _create_an_endpoint() endpoint_id2 = _create_an_endpoint() _create_an_endpoint() # only associate two endpoints with project self.put('/OS-EP-FILTER/projects/%(project_id)s' '/endpoints/%(endpoint_id)s' % { 'project_id': self.project['id'], 'endpoint_id': endpoint_id1}) self.put('/OS-EP-FILTER/projects/%(project_id)s' '/endpoints/%(endpoint_id)s' % { 'project_id': self.project['id'], 'endpoint_id': endpoint_id2}) # there should be only two endpoints in token catalog auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=self.project['id']) r = self.post('/auth/tokens', body=auth_data) self.assertValidProjectScopedTokenResponse( r, require_catalog=True, endpoint_filter=True, ep_filter_assoc=2) def test_get_auth_catalog_using_endpoint_filter(self): # add one endpoint to default project self.put('/OS-EP-FILTER/projects/%(project_id)s' '/endpoints/%(endpoint_id)s' % { 'project_id': self.project['id'], 'endpoint_id': self.endpoint_id}) auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=self.project['id']) token_data = self.post('/auth/tokens', body=auth_data) self.assertValidProjectScopedTokenResponse( token_data, require_catalog=True, endpoint_filter=True, ep_filter_assoc=1) auth_catalog = self.get('/auth/catalog', token=token_data.headers['X-Subject-Token']) self.assertEqual(token_data.result['token']['catalog'], auth_catalog.result['catalog']) class JsonHomeTests(EndpointFilterTestCase, test_v3.JsonHomeTestMixin): JSON_HOME_DATA = { 'http://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/' '1.0/rel/endpoint_projects': { 'href-template': '/OS-EP-FILTER/endpoints/{endpoint_id}/projects', 'href-vars': { 'endpoint_id': 'http://docs.openstack.org/api/openstack-identity/3/param/' 'endpoint_id', }, }, 'http://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/' '1.0/rel/endpoint_groups': { 'href': '/OS-EP-FILTER/endpoint_groups', }, 'http://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/' '1.0/rel/endpoint_group': { 'href-template': '/OS-EP-FILTER/endpoint_groups/' '{endpoint_group_id}', 'href-vars': { 'endpoint_group_id': 'http://docs.openstack.org/api/openstack-identity/3/' 'ext/OS-EP-FILTER/1.0/param/endpoint_group_id', }, }, 'http://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/' '1.0/rel/endpoint_group_to_project_association': { 'href-template': '/OS-EP-FILTER/endpoint_groups/' '{endpoint_group_id}/projects/{project_id}', 'href-vars': { 'project_id': 'http://docs.openstack.org/api/openstack-identity/3/param/' 'project_id', 'endpoint_group_id': 'http://docs.openstack.org/api/openstack-identity/3/' 'ext/OS-EP-FILTER/1.0/param/endpoint_group_id', }, }, 'http://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/' '1.0/rel/projects_associated_with_endpoint_group': { 'href-template': '/OS-EP-FILTER/endpoint_groups/' '{endpoint_group_id}/projects', 'href-vars': { 'endpoint_group_id': 'http://docs.openstack.org/api/openstack-identity/3/' 'ext/OS-EP-FILTER/1.0/param/endpoint_group_id', }, }, 'http://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/' '1.0/rel/endpoints_in_endpoint_group': { 'href-template': '/OS-EP-FILTER/endpoint_groups/' '{endpoint_group_id}/endpoints', 'href-vars': { 'endpoint_group_id': 'http://docs.openstack.org/api/openstack-identity/3/' 'ext/OS-EP-FILTER/1.0/param/endpoint_group_id', }, }, 'http://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/' '1.0/rel/project_endpoint_groups': { 'href-template': '/OS-EP-FILTER/projects/{project_id}/' 'endpoint_groups', 'href-vars': { 'project_id': 'http://docs.openstack.org/api/openstack-identity/3/param/' 'project_id', }, }, } class EndpointGroupCRUDTestCase(EndpointFilterTestCase): DEFAULT_ENDPOINT_GROUP_BODY = { 'endpoint_group': { 'description': 'endpoint group description', 'filters': { 'interface': 'admin' }, 'name': 'endpoint_group_name' } } DEFAULT_ENDPOINT_GROUP_URL = '/OS-EP-FILTER/endpoint_groups' def test_create_endpoint_group(self): """POST /OS-EP-FILTER/endpoint_groups Valid endpoint group test case. """ r = self.post(self.DEFAULT_ENDPOINT_GROUP_URL, body=self.DEFAULT_ENDPOINT_GROUP_BODY) expected_filters = (self.DEFAULT_ENDPOINT_GROUP_BODY ['endpoint_group']['filters']) expected_name = (self.DEFAULT_ENDPOINT_GROUP_BODY ['endpoint_group']['name']) self.assertEqual(expected_filters, r.result['endpoint_group']['filters']) self.assertEqual(expected_name, r.result['endpoint_group']['name']) self.assertThat( r.result['endpoint_group']['links']['self'], matchers.EndsWith( '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % { 'endpoint_group_id': r.result['endpoint_group']['id']})) def test_create_invalid_endpoint_group(self): """POST /OS-EP-FILTER/endpoint_groups Invalid endpoint group creation test case. """ invalid_body = copy.deepcopy(self.DEFAULT_ENDPOINT_GROUP_BODY) invalid_body['endpoint_group']['filters'] = {'foobar': 'admin'} self.post(self.DEFAULT_ENDPOINT_GROUP_URL, body=invalid_body, expected_status=http_client.BAD_REQUEST) def test_get_endpoint_group(self): """GET /OS-EP-FILTER/endpoint_groups/{endpoint_group} Valid endpoint group test case. """ # create an endpoint group to work with response = self.post(self.DEFAULT_ENDPOINT_GROUP_URL, body=self.DEFAULT_ENDPOINT_GROUP_BODY) endpoint_group_id = response.result['endpoint_group']['id'] endpoint_group_filters = response.result['endpoint_group']['filters'] endpoint_group_name = response.result['endpoint_group']['name'] url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % { 'endpoint_group_id': endpoint_group_id} self.get(url) self.assertEqual(endpoint_group_id, response.result['endpoint_group']['id']) self.assertEqual(endpoint_group_filters, response.result['endpoint_group']['filters']) self.assertEqual(endpoint_group_name, response.result['endpoint_group']['name']) self.assertThat(response.result['endpoint_group']['links']['self'], matchers.EndsWith(url)) def test_get_invalid_endpoint_group(self): """GET /OS-EP-FILTER/endpoint_groups/{endpoint_group} Invalid endpoint group test case. """ endpoint_group_id = 'foobar' url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % { 'endpoint_group_id': endpoint_group_id} self.get(url, expected_status=http_client.NOT_FOUND) def test_check_endpoint_group(self): """HEAD /OS-EP-FILTER/endpoint_groups/{endpoint_group_id} Valid endpoint_group_id test case. """ # create an endpoint group to work with endpoint_group_id = self._create_valid_endpoint_group( self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY) url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % { 'endpoint_group_id': endpoint_group_id} self.head(url, expected_status=http_client.OK) def test_check_invalid_endpoint_group(self): """HEAD /OS-EP-FILTER/endpoint_groups/{endpoint_group_id} Invalid endpoint_group_id test case. """ endpoint_group_id = 'foobar' url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % { 'endpoint_group_id': endpoint_group_id} self.head(url, expected_status=http_client.NOT_FOUND) def test_patch_endpoint_group(self): """PATCH /OS-EP-FILTER/endpoint_groups/{endpoint_group} Valid endpoint group patch test case. """ body = copy.deepcopy(self.DEFAULT_ENDPOINT_GROUP_BODY) body['endpoint_group']['filters'] = {'region_id': 'UK'} body['endpoint_group']['name'] = 'patch_test' # create an endpoint group to work with endpoint_group_id = self._create_valid_endpoint_group( self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY) url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % { 'endpoint_group_id': endpoint_group_id} r = self.patch(url, body=body) self.assertEqual(endpoint_group_id, r.result['endpoint_group']['id']) self.assertEqual(body['endpoint_group']['filters'], r.result['endpoint_group']['filters']) self.assertThat(r.result['endpoint_group']['links']['self'], matchers.EndsWith(url)) def test_patch_nonexistent_endpoint_group(self): """PATCH /OS-EP-FILTER/endpoint_groups/{endpoint_group} Invalid endpoint group patch test case. """ body = { 'endpoint_group': { 'name': 'patch_test' } } url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % { 'endpoint_group_id': 'ABC'} self.patch(url, body=body, expected_status=http_client.NOT_FOUND) def test_patch_invalid_endpoint_group(self): """PATCH /OS-EP-FILTER/endpoint_groups/{endpoint_group} Valid endpoint group patch test case. """ body = { 'endpoint_group': { 'description': 'endpoint group description', 'filters': { 'region': 'UK' }, 'name': 'patch_test' } } # create an endpoint group to work with endpoint_group_id = self._create_valid_endpoint_group( self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY) url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % { 'endpoint_group_id': endpoint_group_id} self.patch(url, body=body, expected_status=http_client.BAD_REQUEST) # Perform a GET call to ensure that the content remains # the same (as DEFAULT_ENDPOINT_GROUP_BODY) after attempting to update # with an invalid filter url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % { 'endpoint_group_id': endpoint_group_id} r = self.get(url) del r.result['endpoint_group']['id'] del r.result['endpoint_group']['links'] self.assertDictEqual(self.DEFAULT_ENDPOINT_GROUP_BODY, r.result) def test_delete_endpoint_group(self): """GET /OS-EP-FILTER/endpoint_groups/{endpoint_group} Valid endpoint group test case. """ # create an endpoint group to work with endpoint_group_id = self._create_valid_endpoint_group( self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY) url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % { 'endpoint_group_id': endpoint_group_id} self.delete(url) self.get(url, expected_status=http_client.NOT_FOUND) def test_delete_invalid_endpoint_group(self): """GET /OS-EP-FILTER/endpoint_groups/{endpoint_group} Invalid endpoint group test case. """ endpoint_group_id = 'foobar' url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % { 'endpoint_group_id': endpoint_group_id} self.delete(url, expected_status=http_client.NOT_FOUND) def test_add_endpoint_group_to_project(self): """Create a valid endpoint group and project association.""" endpoint_group_id = self._create_valid_endpoint_group( self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY) self._create_endpoint_group_project_association(endpoint_group_id, self.project_id) def test_add_endpoint_group_to_project_with_invalid_project_id(self): """Create an invalid endpoint group and project association.""" # create an endpoint group to work with endpoint_group_id = self._create_valid_endpoint_group( self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY) # associate endpoint group with project project_id = uuid.uuid4().hex url = self._get_project_endpoint_group_url( endpoint_group_id, project_id) self.put(url, expected_status=http_client.NOT_FOUND) def test_get_endpoint_group_in_project(self): """Test retrieving project endpoint group association.""" # create an endpoint group to work with endpoint_group_id = self._create_valid_endpoint_group( self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY) # associate endpoint group with project url = self._get_project_endpoint_group_url( endpoint_group_id, self.project_id) self.put(url) response = self.get(url) self.assertEqual( endpoint_group_id, response.result['project_endpoint_group']['endpoint_group_id']) self.assertEqual( self.project_id, response.result['project_endpoint_group']['project_id']) def test_get_invalid_endpoint_group_in_project(self): """Test retrieving project endpoint group association.""" endpoint_group_id = uuid.uuid4().hex project_id = uuid.uuid4().hex url = self._get_project_endpoint_group_url( endpoint_group_id, project_id) self.get(url, expected_status=http_client.NOT_FOUND) def test_list_endpoint_groups_in_project(self): """GET /OS-EP-FILTER/projects/{project_id}/endpoint_groups.""" # create an endpoint group to work with endpoint_group_id = self._create_valid_endpoint_group( self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY) # associate endpoint group with project url = self._get_project_endpoint_group_url( endpoint_group_id, self.project_id) self.put(url) url = ('/OS-EP-FILTER/projects/%(project_id)s/endpoint_groups' % {'project_id': self.project_id}) response = self.get(url) self.assertEqual( endpoint_group_id, response.result['endpoint_groups'][0]['id']) def test_list_endpoint_groups_in_invalid_project(self): """Test retrieving from invalid project.""" project_id = uuid.uuid4().hex url = ('/OS-EP-FILTER/projects/%(project_id)s/endpoint_groups' % {'project_id': project_id}) self.get(url, expected_status=http_client.NOT_FOUND) def test_empty_endpoint_groups_in_project(self): """Test when no endpoint groups associated with the project.""" url = ('/OS-EP-FILTER/projects/%(project_id)s/endpoint_groups' % {'project_id': self.project_id}) response = self.get(url) self.assertEqual(0, len(response.result['endpoint_groups'])) def test_check_endpoint_group_to_project(self): """Test HEAD with a valid endpoint group and project association.""" endpoint_group_id = self._create_valid_endpoint_group( self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY) self._create_endpoint_group_project_association(endpoint_group_id, self.project_id) url = self._get_project_endpoint_group_url( endpoint_group_id, self.project_id) self.head(url, expected_status=http_client.OK) def test_check_endpoint_group_to_project_with_invalid_project_id(self): """Test HEAD with an invalid endpoint group and project association.""" # create an endpoint group to work with endpoint_group_id = self._create_valid_endpoint_group( self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY) # create an endpoint group to project association url = self._get_project_endpoint_group_url( endpoint_group_id, self.project_id) self.put(url) # send a head request with an invalid project id project_id = uuid.uuid4().hex url = self._get_project_endpoint_group_url( endpoint_group_id, project_id) self.head(url, expected_status=http_client.NOT_FOUND) def test_list_endpoint_groups(self): """GET /OS-EP-FILTER/endpoint_groups.""" # create an endpoint group to work with endpoint_group_id = self._create_valid_endpoint_group( self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY) # recover all endpoint groups url = '/OS-EP-FILTER/endpoint_groups' r = self.get(url) self.assertNotEmpty(r.result['endpoint_groups']) self.assertEqual(endpoint_group_id, r.result['endpoint_groups'][0].get('id')) def test_list_projects_associated_with_endpoint_group(self): """GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}/projects Valid endpoint group test case. """ # create an endpoint group to work with endpoint_group_id = self._create_valid_endpoint_group( self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY) # associate endpoint group with project self._create_endpoint_group_project_association(endpoint_group_id, self.project_id) # recover list of projects associated with endpoint group url = ('/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' '/projects' % {'endpoint_group_id': endpoint_group_id}) self.get(url) def test_list_endpoints_associated_with_endpoint_group(self): """GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}/endpoints Valid endpoint group test case. """ # create a service service_ref = unit.new_service_ref() response = self.post( '/services', body={'service': service_ref}) service_id = response.result['service']['id'] # create an endpoint endpoint_ref = unit.new_endpoint_ref(service_id=service_id, interface='public', region_id=self.region_id) response = self.post('/endpoints', body={'endpoint': endpoint_ref}) endpoint_id = response.result['endpoint']['id'] # create an endpoint group body = copy.deepcopy(self.DEFAULT_ENDPOINT_GROUP_BODY) body['endpoint_group']['filters'] = {'service_id': service_id} endpoint_group_id = self._create_valid_endpoint_group( self.DEFAULT_ENDPOINT_GROUP_URL, body) # create association self._create_endpoint_group_project_association(endpoint_group_id, self.project_id) # recover list of endpoints associated with endpoint group url = ('/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' '/endpoints' % {'endpoint_group_id': endpoint_group_id}) r = self.get(url) self.assertNotEmpty(r.result['endpoints']) self.assertEqual(endpoint_id, r.result['endpoints'][0].get('id')) def test_list_endpoints_associated_with_project_endpoint_group(self): """GET /OS-EP-FILTER/projects/{project_id}/endpoints Valid project, endpoint id, and endpoint group test case. """ # create a temporary service service_ref = unit.new_service_ref() response = self.post('/services', body={'service': service_ref}) service_id2 = response.result['service']['id'] # create additional endpoints self._create_endpoint_and_associations( self.default_domain_project_id, service_id2) self._create_endpoint_and_associations( self.default_domain_project_id) # create project and endpoint association with default endpoint: self.put(self.default_request_url) # create an endpoint group that contains a different endpoint body = copy.deepcopy(self.DEFAULT_ENDPOINT_GROUP_BODY) body['endpoint_group']['filters'] = {'service_id': service_id2} endpoint_group_id = self._create_valid_endpoint_group( self.DEFAULT_ENDPOINT_GROUP_URL, body) # associate endpoint group with project self._create_endpoint_group_project_association( endpoint_group_id, self.default_domain_project_id) # Now get a list of the filtered endpoints endpoints_url = '/OS-EP-FILTER/projects/%(project_id)s/endpoints' % { 'project_id': self.default_domain_project_id} r = self.get(endpoints_url) endpoints = self.assertValidEndpointListResponse(r) self.assertEqual(2, len(endpoints)) # Ensure catalog includes the endpoints from endpoint_group project # association, this is needed when a project scoped token is issued # and "endpoint_filter.sql" backend driver is in place. user_id = uuid.uuid4().hex catalog_list = self.catalog_api.get_v3_catalog( user_id, self.default_domain_project_id) self.assertEqual(2, len(catalog_list)) # Now remove project endpoint group association url = self._get_project_endpoint_group_url( endpoint_group_id, self.default_domain_project_id) self.delete(url) # Now remove endpoint group url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % { 'endpoint_group_id': endpoint_group_id} self.delete(url) r = self.get(endpoints_url) endpoints = self.assertValidEndpointListResponse(r) self.assertEqual(1, len(endpoints)) catalog_list = self.catalog_api.get_v3_catalog( user_id, self.default_domain_project_id) self.assertEqual(1, len(catalog_list)) def test_endpoint_group_project_cleanup_with_project(self): # create endpoint group endpoint_group_id = self._create_valid_endpoint_group( self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY) # create new project and associate with endpoint_group project_ref = unit.new_project_ref(domain_id=self.domain_id) r = self.post('/projects', body={'project': project_ref}) project = self.assertValidProjectResponse(r, project_ref) url = self._get_project_endpoint_group_url(endpoint_group_id, project['id']) self.put(url) # check that we can recover the project endpoint group association self.get(url) # Now delete the project and then try and retrieve the project # endpoint group association again self.delete('/projects/%(project_id)s' % { 'project_id': project['id']}) self.get(url, expected_status=http_client.NOT_FOUND) def test_endpoint_group_project_cleanup_with_endpoint_group(self): # create endpoint group endpoint_group_id = self._create_valid_endpoint_group( self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY) # create new project and associate with endpoint_group project_ref = unit.new_project_ref(domain_id=self.domain_id) r = self.post('/projects', body={'project': project_ref}) project = self.assertValidProjectResponse(r, project_ref) url = self._get_project_endpoint_group_url(endpoint_group_id, project['id']) self.put(url) # check that we can recover the project endpoint group association self.get(url) # now remove the project endpoint group association self.delete(url) self.get(url, expected_status=http_client.NOT_FOUND) def test_removing_an_endpoint_group_project(self): # create an endpoint group endpoint_group_id = self._create_valid_endpoint_group( self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY) # create an endpoint_group project url = self._get_project_endpoint_group_url( endpoint_group_id, self.default_domain_project_id) self.put(url) # remove the endpoint group project self.delete(url) self.get(url, expected_status=http_client.NOT_FOUND) def test_remove_endpoint_group_with_project_association(self): # create an endpoint group endpoint_group_id = self._create_valid_endpoint_group( self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY) # create an endpoint_group project project_endpoint_group_url = self._get_project_endpoint_group_url( endpoint_group_id, self.default_domain_project_id) self.put(project_endpoint_group_url) # remove endpoint group, the associated endpoint_group project will # be removed as well. endpoint_group_url = ('/OS-EP-FILTER/endpoint_groups/' '%(endpoint_group_id)s' % {'endpoint_group_id': endpoint_group_id}) self.delete(endpoint_group_url) self.get(endpoint_group_url, expected_status=http_client.NOT_FOUND) self.get(project_endpoint_group_url, expected_status=http_client.NOT_FOUND) @unit.skip_if_cache_disabled('catalog') def test_add_endpoint_group_to_project_invalidates_catalog_cache(self): # create another endpoint with 'admin' interface which matches # 'filters' definition in endpoint group, then there should be two # endpoints returned when retrieving v3 catalog if cache works as # expected. # this should be done at first since `create_endpoint` will also # invalidate cache. endpoint_id2 = uuid.uuid4().hex endpoint2 = unit.new_endpoint_ref(service_id=self.service_id, region_id=self.region_id, interface='admin', id=endpoint_id2) self.catalog_api.create_endpoint(endpoint_id2, endpoint2) # create a project and endpoint association. self.put(self.default_request_url) # there is only one endpoint associated with the default project. user_id = uuid.uuid4().hex catalog = self.catalog_api.get_v3_catalog( user_id, self.default_domain_project_id) self.assertThat(catalog[0]['endpoints'], matchers.HasLength(1)) # create an endpoint group. endpoint_group_id = self._create_valid_endpoint_group( self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY) # add the endpoint group to default project, bypassing # catalog_api API manager. self.catalog_api.driver.add_endpoint_group_to_project( endpoint_group_id, self.default_domain_project_id) # can get back only one endpoint from the cache, since the catalog # is pulled out from cache. invalid_catalog = self.catalog_api.get_v3_catalog( user_id, self.default_domain_project_id) self.assertThat(invalid_catalog[0]['endpoints'], matchers.HasLength(1)) self.assertEqual(catalog, invalid_catalog) # remove the endpoint group from default project, and add it again via # catalog_api API manager. self.catalog_api.driver.remove_endpoint_group_from_project( endpoint_group_id, self.default_domain_project_id) # add the endpoint group to default project. self.catalog_api.add_endpoint_group_to_project( endpoint_group_id, self.default_domain_project_id) catalog = self.catalog_api.get_v3_catalog( user_id, self.default_domain_project_id) # now, it will return 2 endpoints since the cache has been # invalidated. self.assertThat(catalog[0]['endpoints'], matchers.HasLength(2)) ep_id_list = [catalog[0]['endpoints'][0]['id'], catalog[0]['endpoints'][1]['id']] self.assertItemsEqual([self.endpoint_id, endpoint_id2], ep_id_list) @unit.skip_if_cache_disabled('catalog') def test_remove_endpoint_group_from_project_invalidates_cache(self): # create another endpoint with 'admin' interface which matches # 'filters' definition in endpoint group, then there should be two # endpoints returned when retrieving v3 catalog. But only one # endpoint will return after the endpoint group's deletion if cache # works as expected. # this should be done at first since `create_endpoint` will also # invalidate cache. endpoint_id2 = uuid.uuid4().hex endpoint2 = unit.new_endpoint_ref(service_id=self.service_id, region_id=self.region_id, interface='admin', id=endpoint_id2) self.catalog_api.create_endpoint(endpoint_id2, endpoint2) # create project and endpoint association. self.put(self.default_request_url) # create an endpoint group. endpoint_group_id = self._create_valid_endpoint_group( self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY) # add the endpoint group to default project. self.catalog_api.add_endpoint_group_to_project( endpoint_group_id, self.default_domain_project_id) # should get back two endpoints, one from endpoint project # association, the other one is from endpoint_group project # association. user_id = uuid.uuid4().hex catalog = self.catalog_api.get_v3_catalog( user_id, self.default_domain_project_id) self.assertThat(catalog[0]['endpoints'], matchers.HasLength(2)) ep_id_list = [catalog[0]['endpoints'][0]['id'], catalog[0]['endpoints'][1]['id']] self.assertItemsEqual([self.endpoint_id, endpoint_id2], ep_id_list) # remove endpoint_group project association, bypassing # catalog_api API manager. self.catalog_api.driver.remove_endpoint_group_from_project( endpoint_group_id, self.default_domain_project_id) # still get back two endpoints, since the catalog is pulled out # from cache and the cache haven't been invalidated. invalid_catalog = self.catalog_api.get_v3_catalog( user_id, self.default_domain_project_id) self.assertThat(invalid_catalog[0]['endpoints'], matchers.HasLength(2)) self.assertEqual(catalog, invalid_catalog) # add back the endpoint_group project association and remove it from # manager. self.catalog_api.driver.add_endpoint_group_to_project( endpoint_group_id, self.default_domain_project_id) self.catalog_api.remove_endpoint_group_from_project( endpoint_group_id, self.default_domain_project_id) # should only get back one endpoint since the cache has been # invalidated after the endpoint_group project association was # removed. catalog = self.catalog_api.get_v3_catalog( user_id, self.default_domain_project_id) self.assertThat(catalog[0]['endpoints'], matchers.HasLength(1)) self.assertEqual(self.endpoint_id, catalog[0]['endpoints'][0]['id']) def _create_valid_endpoint_group(self, url, body): r = self.post(url, body=body) return r.result['endpoint_group']['id'] def _create_endpoint_group_project_association(self, endpoint_group_id, project_id): url = self._get_project_endpoint_group_url(endpoint_group_id, project_id) self.put(url) def _get_project_endpoint_group_url(self, endpoint_group_id, project_id): return ('/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' '/projects/%(project_id)s' % {'endpoint_group_id': endpoint_group_id, 'project_id': project_id}) def _create_endpoint_and_associations(self, project_id, service_id=None): """Creates an endpoint associated with service and project.""" if not service_id: # create a new service service_ref = unit.new_service_ref() response = self.post( '/services', body={'service': service_ref}) service_id = response.result['service']['id'] # create endpoint endpoint_ref = unit.new_endpoint_ref(service_id=service_id, interface='public', region_id=self.region_id) response = self.post('/endpoints', body={'endpoint': endpoint_ref}) endpoint = response.result['endpoint'] # now add endpoint to project self.put('/OS-EP-FILTER/projects/%(project_id)s' '/endpoints/%(endpoint_id)s' % { 'project_id': self.project['id'], 'endpoint_id': endpoint['id']}) return endpoint keystone-9.0.0/keystone/tests/unit/test_backend_federation_sql.py0000664000567000056710000000367312701407102026566 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common import sql from keystone.tests.unit import test_backend_sql class SqlFederation(test_backend_sql.SqlModels): """Set of tests for checking SQL Federation.""" def test_identity_provider(self): cols = (('id', sql.String, 64), ('enabled', sql.Boolean, None), ('description', sql.Text, None)) self.assertExpectedSchema('identity_provider', cols) def test_idp_remote_ids(self): cols = (('idp_id', sql.String, 64), ('remote_id', sql.String, 255)) self.assertExpectedSchema('idp_remote_ids', cols) def test_federated_protocol(self): cols = (('id', sql.String, 64), ('idp_id', sql.String, 64), ('mapping_id', sql.String, 64)) self.assertExpectedSchema('federation_protocol', cols) def test_mapping(self): cols = (('id', sql.String, 64), ('rules', sql.JsonBlob, None)) self.assertExpectedSchema('mapping', cols) def test_service_provider(self): cols = (('auth_url', sql.String, 256), ('id', sql.String, 64), ('enabled', sql.Boolean, None), ('description', sql.Text, None), ('relay_state_prefix', sql.String, 256), ('sp_url', sql.String, 256)) self.assertExpectedSchema('service_provider', cols) keystone-9.0.0/keystone/tests/unit/test_backend_kvs.py0000664000567000056710000001233412701407102024364 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import uuid from oslo_utils import timeutils import six from keystone.common import utils from keystone import exception from keystone.tests import unit from keystone.tests.unit.ksfixtures import database from keystone.tests.unit.token import test_backends as token_tests class KvsToken(unit.TestCase, token_tests.TokenTests): def setUp(self): super(KvsToken, self).setUp() self.load_backends() def test_flush_expired_token(self): self.assertRaises( exception.NotImplemented, self.token_provider_api._persistence.flush_expired_tokens) def _update_user_token_index_direct(self, user_key, token_id, new_data): persistence = self.token_provider_api._persistence token_list = persistence.driver._get_user_token_list_with_expiry( user_key) # Update the user-index so that the expires time is _actually_ expired # since we do not do an explicit get on the token, we only reference # the data in the user index (to save extra round-trips to the kvs # backend). for i, data in enumerate(token_list): if data[0] == token_id: token_list[i] = new_data break self.token_provider_api._persistence.driver._store.set(user_key, token_list) def test_cleanup_user_index_on_create(self): user_id = six.text_type(uuid.uuid4().hex) valid_token_id, data = self.create_token_sample_data(user_id=user_id) expired_token_id, expired_data = self.create_token_sample_data( user_id=user_id) expire_delta = datetime.timedelta(seconds=86400) # NOTE(morganfainberg): Directly access the data cache since we need to # get expired tokens as well as valid tokens. token_persistence = self.token_provider_api._persistence user_key = token_persistence.driver._prefix_user_id(user_id) user_token_list = token_persistence.driver._store.get(user_key) valid_token_ref = token_persistence.get_token(valid_token_id) expired_token_ref = token_persistence.get_token(expired_token_id) expected_user_token_list = [ (valid_token_id, utils.isotime(valid_token_ref['expires'], subsecond=True)), (expired_token_id, utils.isotime(expired_token_ref['expires'], subsecond=True))] self.assertEqual(expected_user_token_list, user_token_list) new_expired_data = (expired_token_id, utils.isotime( (timeutils.utcnow() - expire_delta), subsecond=True)) self._update_user_token_index_direct(user_key, expired_token_id, new_expired_data) valid_token_id_2, valid_data_2 = self.create_token_sample_data( user_id=user_id) valid_token_ref_2 = token_persistence.get_token(valid_token_id_2) expected_user_token_list = [ (valid_token_id, utils.isotime(valid_token_ref['expires'], subsecond=True)), (valid_token_id_2, utils.isotime(valid_token_ref_2['expires'], subsecond=True))] user_token_list = token_persistence.driver._store.get(user_key) self.assertEqual(expected_user_token_list, user_token_list) # Test that revoked tokens are removed from the list on create. token_persistence.delete_token(valid_token_id_2) new_token_id, data = self.create_token_sample_data(user_id=user_id) new_token_ref = token_persistence.get_token(new_token_id) expected_user_token_list = [ (valid_token_id, utils.isotime(valid_token_ref['expires'], subsecond=True)), (new_token_id, utils.isotime(new_token_ref['expires'], subsecond=True))] user_token_list = token_persistence.driver._store.get(user_key) self.assertEqual(expected_user_token_list, user_token_list) class KvsTokenCacheInvalidation(unit.TestCase, token_tests.TokenCacheInvalidation): def setUp(self): super(KvsTokenCacheInvalidation, self).setUp() self.useFixture(database.Database(self.sql_driver_version_overrides)) self.load_backends() self._create_test_data() def config_overrides(self): super(KvsTokenCacheInvalidation, self).config_overrides() self.config_fixture.config(group='token', driver='kvs') keystone-9.0.0/keystone/tests/unit/trust/0000775000567000056710000000000012701407246021670 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/trust/__init__.py0000664000567000056710000000000012701407102023756 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/trust/test_backends.py0000664000567000056710000001662312701407102025052 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import uuid from oslo_utils import timeutils from six.moves import range from keystone import exception class TrustTests(object): def create_sample_trust(self, new_id, remaining_uses=None): self.trustor = self.user_foo self.trustee = self.user_two expires_at = datetime.datetime.utcnow().replace(year=2032) trust_data = (self.trust_api.create_trust (new_id, {'trustor_user_id': self.trustor['id'], 'trustee_user_id': self.user_two['id'], 'project_id': self.tenant_bar['id'], 'expires_at': expires_at, 'impersonation': True, 'remaining_uses': remaining_uses}, roles=[{"id": "member"}, {"id": "other"}, {"id": "browser"}])) return trust_data def test_delete_trust(self): new_id = uuid.uuid4().hex trust_data = self.create_sample_trust(new_id) trust_id = trust_data['id'] self.assertIsNotNone(trust_data) trust_data = self.trust_api.get_trust(trust_id) self.assertEqual(new_id, trust_data['id']) self.trust_api.delete_trust(trust_id) self.assertRaises(exception.TrustNotFound, self.trust_api.get_trust, trust_id) def test_delete_trust_not_found(self): trust_id = uuid.uuid4().hex self.assertRaises(exception.TrustNotFound, self.trust_api.delete_trust, trust_id) def test_get_trust(self): new_id = uuid.uuid4().hex trust_data = self.create_sample_trust(new_id) trust_id = trust_data['id'] self.assertIsNotNone(trust_data) trust_data = self.trust_api.get_trust(trust_id) self.assertEqual(new_id, trust_data['id']) self.trust_api.delete_trust(trust_data['id']) def test_get_deleted_trust(self): new_id = uuid.uuid4().hex trust_data = self.create_sample_trust(new_id) self.assertIsNotNone(trust_data) self.assertIsNone(trust_data['deleted_at']) self.trust_api.delete_trust(new_id) self.assertRaises(exception.TrustNotFound, self.trust_api.get_trust, new_id) deleted_trust = self.trust_api.get_trust(trust_data['id'], deleted=True) self.assertEqual(trust_data['id'], deleted_trust['id']) self.assertIsNotNone(deleted_trust.get('deleted_at')) def test_create_trust(self): new_id = uuid.uuid4().hex trust_data = self.create_sample_trust(new_id) self.assertEqual(new_id, trust_data['id']) self.assertEqual(self.trustee['id'], trust_data['trustee_user_id']) self.assertEqual(self.trustor['id'], trust_data['trustor_user_id']) self.assertTrue(timeutils.normalize_time(trust_data['expires_at']) > timeutils.utcnow()) self.assertEqual([{'id': 'member'}, {'id': 'other'}, {'id': 'browser'}], trust_data['roles']) def test_list_trust_by_trustee(self): for i in range(3): self.create_sample_trust(uuid.uuid4().hex) trusts = self.trust_api.list_trusts_for_trustee(self.trustee['id']) self.assertEqual(3, len(trusts)) self.assertEqual(trusts[0]["trustee_user_id"], self.trustee['id']) trusts = self.trust_api.list_trusts_for_trustee(self.trustor['id']) self.assertEqual(0, len(trusts)) def test_list_trust_by_trustor(self): for i in range(3): self.create_sample_trust(uuid.uuid4().hex) trusts = self.trust_api.list_trusts_for_trustor(self.trustor['id']) self.assertEqual(3, len(trusts)) self.assertEqual(trusts[0]["trustor_user_id"], self.trustor['id']) trusts = self.trust_api.list_trusts_for_trustor(self.trustee['id']) self.assertEqual(0, len(trusts)) def test_list_trusts(self): for i in range(3): self.create_sample_trust(uuid.uuid4().hex) trusts = self.trust_api.list_trusts() self.assertEqual(3, len(trusts)) def test_trust_has_remaining_uses_positive(self): # create a trust with limited uses, check that we have uses left trust_data = self.create_sample_trust(uuid.uuid4().hex, remaining_uses=5) self.assertEqual(5, trust_data['remaining_uses']) # create a trust with unlimited uses, check that we have uses left trust_data = self.create_sample_trust(uuid.uuid4().hex) self.assertIsNone(trust_data['remaining_uses']) def test_trust_has_remaining_uses_negative(self): # try to create a trust with no remaining uses, check that it fails self.assertRaises(exception.ValidationError, self.create_sample_trust, uuid.uuid4().hex, remaining_uses=0) # try to create a trust with negative remaining uses, # check that it fails self.assertRaises(exception.ValidationError, self.create_sample_trust, uuid.uuid4().hex, remaining_uses=-12) def test_consume_use(self): # consume a trust repeatedly until it has no uses anymore trust_data = self.create_sample_trust(uuid.uuid4().hex, remaining_uses=2) self.trust_api.consume_use(trust_data['id']) t = self.trust_api.get_trust(trust_data['id']) self.assertEqual(1, t['remaining_uses']) self.trust_api.consume_use(trust_data['id']) # This was the last use, the trust isn't available anymore self.assertRaises(exception.TrustNotFound, self.trust_api.get_trust, trust_data['id']) def test_duplicate_trusts_not_allowed(self): self.trustor = self.user_foo self.trustee = self.user_two trust_data = {'trustor_user_id': self.trustor['id'], 'trustee_user_id': self.user_two['id'], 'project_id': self.tenant_bar['id'], 'expires_at': timeutils.parse_isotime( '2032-02-18T18:10:00Z'), 'impersonation': True, 'remaining_uses': None} roles = [{"id": "member"}, {"id": "other"}, {"id": "browser"}] self.trust_api.create_trust(uuid.uuid4().hex, trust_data, roles) self.assertRaises(exception.Conflict, self.trust_api.create_trust, uuid.uuid4().hex, trust_data, roles) keystone-9.0.0/keystone/tests/unit/contrib/0000775000567000056710000000000012701407246022147 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/contrib/__init__.py0000664000567000056710000000000012701407102024235 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/contrib/federation/0000775000567000056710000000000012701407246024267 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/contrib/federation/__init__.py0000664000567000056710000000000012701407102026355 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/contrib/federation/test_utils.py0000664000567000056710000007560112701407105027043 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from oslo_config import cfg from oslo_config import fixture as config_fixture from oslo_serialization import jsonutils from keystone.auth.plugins import mapped from keystone import exception from keystone.federation import utils as mapping_utils from keystone.tests import unit from keystone.tests.unit import mapping_fixtures FAKE_MAPPING_ID = uuid.uuid4().hex class MappingRuleEngineTests(unit.BaseTestCase): """A class for testing the mapping rule engine.""" def assertValidMappedUserObject(self, mapped_properties, user_type='ephemeral', domain_id=None): """Check whether mapped properties object has 'user' within. According to today's rules, RuleProcessor does not have to issue user's id or name. What's actually required is user's type and for ephemeral users that would be service domain named 'Federated'. """ self.assertIn('user', mapped_properties, message='Missing user object in mapped properties') user = mapped_properties['user'] self.assertIn('type', user) self.assertEqual(user_type, user['type']) self.assertIn('domain', user) domain = user['domain'] domain_name_or_id = domain.get('id') or domain.get('name') domain_ref = domain_id or 'Federated' self.assertEqual(domain_ref, domain_name_or_id) def test_rule_engine_any_one_of_and_direct_mapping(self): """Should return user's name and group id EMPLOYEE_GROUP_ID. The ADMIN_ASSERTION should successfully have a match in MAPPING_LARGE. They will test the case where `any_one_of` is valid, and there is a direct mapping for the users name. """ mapping = mapping_fixtures.MAPPING_LARGE assertion = mapping_fixtures.ADMIN_ASSERTION rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) values = rp.process(assertion) fn = assertion.get('FirstName') ln = assertion.get('LastName') full_name = '%s %s' % (fn, ln) group_ids = values.get('group_ids') user_name = values.get('user', {}).get('name') self.assertIn(mapping_fixtures.EMPLOYEE_GROUP_ID, group_ids) self.assertEqual(full_name, user_name) def test_rule_engine_no_regex_match(self): """Should deny authorization, the email of the tester won't match. This will not match since the email in the assertion will fail the regex test. It is set to match any @example.com address. But the incoming value is set to eviltester@example.org. RuleProcessor should raise ValidationError. """ mapping = mapping_fixtures.MAPPING_LARGE assertion = mapping_fixtures.BAD_TESTER_ASSERTION rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) self.assertRaises(exception.ValidationError, rp.process, assertion) def test_rule_engine_regex_many_groups(self): """Should return group CONTRACTOR_GROUP_ID. The TESTER_ASSERTION should successfully have a match in MAPPING_TESTER_REGEX. This will test the case where many groups are in the assertion, and a regex value is used to try and find a match. """ mapping = mapping_fixtures.MAPPING_TESTER_REGEX assertion = mapping_fixtures.TESTER_ASSERTION rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) values = rp.process(assertion) self.assertValidMappedUserObject(values) user_name = assertion.get('UserName') group_ids = values.get('group_ids') name = values.get('user', {}).get('name') self.assertEqual(user_name, name) self.assertIn(mapping_fixtures.TESTER_GROUP_ID, group_ids) def test_rule_engine_any_one_of_many_rules(self): """Should return group CONTRACTOR_GROUP_ID. The CONTRACTOR_ASSERTION should successfully have a match in MAPPING_SMALL. This will test the case where many rules must be matched, including an `any_one_of`, and a direct mapping. """ mapping = mapping_fixtures.MAPPING_SMALL assertion = mapping_fixtures.CONTRACTOR_ASSERTION rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) values = rp.process(assertion) self.assertValidMappedUserObject(values) user_name = assertion.get('UserName') group_ids = values.get('group_ids') name = values.get('user', {}).get('name') self.assertEqual(user_name, name) self.assertIn(mapping_fixtures.CONTRACTOR_GROUP_ID, group_ids) def test_rule_engine_not_any_of_and_direct_mapping(self): """Should return user's name and email. The CUSTOMER_ASSERTION should successfully have a match in MAPPING_LARGE. This will test the case where a requirement has `not_any_of`, and direct mapping to a username, no group. """ mapping = mapping_fixtures.MAPPING_LARGE assertion = mapping_fixtures.CUSTOMER_ASSERTION rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) values = rp.process(assertion) self.assertValidMappedUserObject(values) user_name = assertion.get('UserName') group_ids = values.get('group_ids') name = values.get('user', {}).get('name') self.assertEqual(user_name, name) self.assertEqual([], group_ids,) def test_rule_engine_not_any_of_many_rules(self): """Should return group EMPLOYEE_GROUP_ID. The EMPLOYEE_ASSERTION should successfully have a match in MAPPING_SMALL. This will test the case where many remote rules must be matched, including a `not_any_of`. """ mapping = mapping_fixtures.MAPPING_SMALL assertion = mapping_fixtures.EMPLOYEE_ASSERTION rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) values = rp.process(assertion) self.assertValidMappedUserObject(values) user_name = assertion.get('UserName') group_ids = values.get('group_ids') name = values.get('user', {}).get('name') self.assertEqual(user_name, name) self.assertIn(mapping_fixtures.EMPLOYEE_GROUP_ID, group_ids) def test_rule_engine_not_any_of_regex_verify_pass(self): """Should return group DEVELOPER_GROUP_ID. The DEVELOPER_ASSERTION should successfully have a match in MAPPING_DEVELOPER_REGEX. This will test the case where many remote rules must be matched, including a `not_any_of`, with regex set to True. """ mapping = mapping_fixtures.MAPPING_DEVELOPER_REGEX assertion = mapping_fixtures.DEVELOPER_ASSERTION rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) values = rp.process(assertion) self.assertValidMappedUserObject(values) user_name = assertion.get('UserName') group_ids = values.get('group_ids') name = values.get('user', {}).get('name') self.assertEqual(user_name, name) self.assertIn(mapping_fixtures.DEVELOPER_GROUP_ID, group_ids) def test_rule_engine_not_any_of_regex_verify_fail(self): """Should deny authorization. The email in the assertion will fail the regex test. It is set to reject any @example.org address, but the incoming value is set to evildeveloper@example.org. RuleProcessor should yield ValidationError. """ mapping = mapping_fixtures.MAPPING_DEVELOPER_REGEX assertion = mapping_fixtures.BAD_DEVELOPER_ASSERTION rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) self.assertRaises(exception.ValidationError, rp.process, assertion) def _rule_engine_regex_match_and_many_groups(self, assertion): """Should return group DEVELOPER_GROUP_ID and TESTER_GROUP_ID. A helper function injecting assertion passed as an argument. Expect DEVELOPER_GROUP_ID and TESTER_GROUP_ID in the results. """ mapping = mapping_fixtures.MAPPING_LARGE rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) values = rp.process(assertion) user_name = assertion.get('UserName') group_ids = values.get('group_ids') name = values.get('user', {}).get('name') self.assertValidMappedUserObject(values) self.assertEqual(user_name, name) self.assertIn(mapping_fixtures.DEVELOPER_GROUP_ID, group_ids) self.assertIn(mapping_fixtures.TESTER_GROUP_ID, group_ids) def test_rule_engine_regex_match_and_many_groups(self): """Should return group DEVELOPER_GROUP_ID and TESTER_GROUP_ID. The TESTER_ASSERTION should successfully have a match in MAPPING_LARGE. This will test a successful regex match for an `any_one_of` evaluation type, and will have many groups returned. """ self._rule_engine_regex_match_and_many_groups( mapping_fixtures.TESTER_ASSERTION) def test_rule_engine_discards_nonstring_objects(self): """Check whether RuleProcessor discards non string objects. Despite the fact that assertion is malformed and contains non string objects, RuleProcessor should correctly discard them and successfully have a match in MAPPING_LARGE. """ self._rule_engine_regex_match_and_many_groups( mapping_fixtures.MALFORMED_TESTER_ASSERTION) def test_rule_engine_fails_after_discarding_nonstring(self): """Check whether RuleProcessor discards non string objects. Expect RuleProcessor to discard non string object, which is required for a correct rule match. RuleProcessor will result with ValidationError. """ mapping = mapping_fixtures.MAPPING_SMALL rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) assertion = mapping_fixtures.CONTRACTOR_MALFORMED_ASSERTION self.assertRaises(exception.ValidationError, rp.process, assertion) def test_using_remote_direct_mapping_that_doesnt_exist_fails(self): """Test for the correct error when referring to a bad remote match. The remote match must exist in a rule when a local section refers to a remote matching using the format (e.g. {0} in a local section). """ mapping = mapping_fixtures.MAPPING_DIRECT_MAPPING_THROUGH_KEYWORD rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) assertion = mapping_fixtures.CUSTOMER_ASSERTION self.assertRaises(exception.DirectMappingError, rp.process, assertion) def test_rule_engine_returns_group_names(self): """Check whether RuleProcessor returns group names with their domains. RuleProcessor should return 'group_names' entry with a list of dictionaries with two entries 'name' and 'domain' identifying group by its name and domain. """ mapping = mapping_fixtures.MAPPING_GROUP_NAMES rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) assertion = mapping_fixtures.EMPLOYEE_ASSERTION mapped_properties = rp.process(assertion) self.assertIsNotNone(mapped_properties) self.assertValidMappedUserObject(mapped_properties) reference = { mapping_fixtures.DEVELOPER_GROUP_NAME: { "name": mapping_fixtures.DEVELOPER_GROUP_NAME, "domain": { "name": mapping_fixtures.DEVELOPER_GROUP_DOMAIN_NAME } }, mapping_fixtures.TESTER_GROUP_NAME: { "name": mapping_fixtures.TESTER_GROUP_NAME, "domain": { "id": mapping_fixtures.DEVELOPER_GROUP_DOMAIN_ID } } } for rule in mapped_properties['group_names']: self.assertDictEqual(reference.get(rule.get('name')), rule) def test_rule_engine_whitelist_and_direct_groups_mapping(self): """Should return user's groups Developer and Contractor. The EMPLOYEE_ASSERTION_MULTIPLE_GROUPS should successfully have a match in MAPPING_GROUPS_WHITELIST. It will test the case where 'whitelist' correctly filters out Manager and only allows Developer and Contractor. """ mapping = mapping_fixtures.MAPPING_GROUPS_WHITELIST assertion = mapping_fixtures.EMPLOYEE_ASSERTION_MULTIPLE_GROUPS rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) mapped_properties = rp.process(assertion) self.assertIsNotNone(mapped_properties) reference = { mapping_fixtures.DEVELOPER_GROUP_NAME: { "name": mapping_fixtures.DEVELOPER_GROUP_NAME, "domain": { "id": mapping_fixtures.DEVELOPER_GROUP_DOMAIN_ID } }, mapping_fixtures.CONTRACTOR_GROUP_NAME: { "name": mapping_fixtures.CONTRACTOR_GROUP_NAME, "domain": { "id": mapping_fixtures.DEVELOPER_GROUP_DOMAIN_ID } } } for rule in mapped_properties['group_names']: self.assertDictEqual(reference.get(rule.get('name')), rule) self.assertEqual('tbo', mapped_properties['user']['name']) self.assertEqual([], mapped_properties['group_ids']) def test_rule_engine_blacklist_and_direct_groups_mapping(self): """Should return user's group Developer. The EMPLOYEE_ASSERTION_MULTIPLE_GROUPS should successfully have a match in MAPPING_GROUPS_BLACKLIST. It will test the case where 'blacklist' correctly filters out Manager and Developer and only allows Contractor. """ mapping = mapping_fixtures.MAPPING_GROUPS_BLACKLIST assertion = mapping_fixtures.EMPLOYEE_ASSERTION_MULTIPLE_GROUPS rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) mapped_properties = rp.process(assertion) self.assertIsNotNone(mapped_properties) reference = { mapping_fixtures.CONTRACTOR_GROUP_NAME: { "name": mapping_fixtures.CONTRACTOR_GROUP_NAME, "domain": { "id": mapping_fixtures.DEVELOPER_GROUP_DOMAIN_ID } } } for rule in mapped_properties['group_names']: self.assertDictEqual(reference.get(rule.get('name')), rule) self.assertEqual('tbo', mapped_properties['user']['name']) self.assertEqual([], mapped_properties['group_ids']) def test_rule_engine_blacklist_and_direct_groups_mapping_multiples(self): """Tests matching multiple values before the blacklist. Verifies that the local indexes are correct when matching multiple remote values for a field when the field occurs before the blacklist entry in the remote rules. """ mapping = mapping_fixtures.MAPPING_GROUPS_BLACKLIST_MULTIPLES assertion = mapping_fixtures.EMPLOYEE_ASSERTION_MULTIPLE_GROUPS rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) mapped_properties = rp.process(assertion) self.assertIsNotNone(mapped_properties) reference = { mapping_fixtures.CONTRACTOR_GROUP_NAME: { "name": mapping_fixtures.CONTRACTOR_GROUP_NAME, "domain": { "id": mapping_fixtures.DEVELOPER_GROUP_DOMAIN_ID } } } for rule in mapped_properties['group_names']: self.assertDictEqual(reference.get(rule.get('name')), rule) self.assertEqual('tbo', mapped_properties['user']['name']) self.assertEqual([], mapped_properties['group_ids']) def test_rule_engine_whitelist_direct_group_mapping_missing_domain(self): """Test if the local rule is rejected upon missing domain value This is a variation with a ``whitelist`` filter. """ mapping = mapping_fixtures.MAPPING_GROUPS_WHITELIST_MISSING_DOMAIN assertion = mapping_fixtures.EMPLOYEE_ASSERTION_MULTIPLE_GROUPS rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) self.assertRaises(exception.ValidationError, rp.process, assertion) def test_rule_engine_blacklist_direct_group_mapping_missing_domain(self): """Test if the local rule is rejected upon missing domain value This is a variation with a ``blacklist`` filter. """ mapping = mapping_fixtures.MAPPING_GROUPS_BLACKLIST_MISSING_DOMAIN assertion = mapping_fixtures.EMPLOYEE_ASSERTION_MULTIPLE_GROUPS rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) self.assertRaises(exception.ValidationError, rp.process, assertion) def test_rule_engine_no_groups_allowed(self): """Should return user mapped to no groups. The EMPLOYEE_ASSERTION should successfully have a match in MAPPING_GROUPS_WHITELIST, but 'whitelist' should filter out the group values from the assertion and thus map to no groups. """ mapping = mapping_fixtures.MAPPING_GROUPS_WHITELIST assertion = mapping_fixtures.EMPLOYEE_ASSERTION rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) mapped_properties = rp.process(assertion) self.assertIsNotNone(mapped_properties) self.assertListEqual(mapped_properties['group_names'], []) self.assertListEqual(mapped_properties['group_ids'], []) self.assertEqual('tbo', mapped_properties['user']['name']) def test_mapping_federated_domain_specified(self): """Test mapping engine when domain 'ephemeral' is explicitly set. For that, we use mapping rule MAPPING_EPHEMERAL_USER and assertion EMPLOYEE_ASSERTION """ mapping = mapping_fixtures.MAPPING_EPHEMERAL_USER rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) assertion = mapping_fixtures.EMPLOYEE_ASSERTION mapped_properties = rp.process(assertion) self.assertIsNotNone(mapped_properties) self.assertValidMappedUserObject(mapped_properties) def test_set_ephemeral_domain_to_ephemeral_users(self): """Test auto assigning service domain to ephemeral users. Test that ephemeral users will always become members of federated service domain. The check depends on ``type`` value which must be set to ``ephemeral`` in case of ephemeral user. """ mapping = mapping_fixtures.MAPPING_EPHEMERAL_USER_LOCAL_DOMAIN rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) assertion = mapping_fixtures.CONTRACTOR_ASSERTION mapped_properties = rp.process(assertion) self.assertIsNotNone(mapped_properties) self.assertValidMappedUserObject(mapped_properties) def test_local_user_local_domain(self): """Test that local users can have non-service domains assigned.""" mapping = mapping_fixtures.MAPPING_LOCAL_USER_LOCAL_DOMAIN rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) assertion = mapping_fixtures.CONTRACTOR_ASSERTION mapped_properties = rp.process(assertion) self.assertIsNotNone(mapped_properties) self.assertValidMappedUserObject( mapped_properties, user_type='local', domain_id=mapping_fixtures.LOCAL_DOMAIN) def test_user_identifications_name(self): """Test varius mapping options and how users are identified. This test calls mapped.setup_username() for propagating user object. Test plan: - Check if the user has proper domain ('federated') set - Check if the user has property type set ('ephemeral') - Check if user's name is properly mapped from the assertion - Check if unique_id is properly set and equal to display_name, as it was not explicitly specified in the mapping. """ mapping = mapping_fixtures.MAPPING_USER_IDS rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) assertion = mapping_fixtures.CONTRACTOR_ASSERTION mapped_properties = rp.process(assertion) self.assertIsNotNone(mapped_properties) self.assertValidMappedUserObject(mapped_properties) self.assertEqual('jsmith', mapped_properties['user']['name']) unique_id, display_name = mapped.get_user_unique_id_and_display_name( {}, mapped_properties) self.assertEqual('jsmith', unique_id) self.assertEqual('jsmith', display_name) def test_user_identifications_name_and_federated_domain(self): """Test varius mapping options and how users are identified. This test calls mapped.setup_username() for propagating user object. Test plan: - Check if the user has proper domain ('federated') set - Check if the user has propert type set ('ephemeral') - Check if user's name is properly mapped from the assertion - Check if the unique_id and display_name are properly set """ mapping = mapping_fixtures.MAPPING_USER_IDS rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) assertion = mapping_fixtures.EMPLOYEE_ASSERTION mapped_properties = rp.process(assertion) self.assertIsNotNone(mapped_properties) self.assertValidMappedUserObject(mapped_properties) unique_id, display_name = mapped.get_user_unique_id_and_display_name( {}, mapped_properties) self.assertEqual('tbo', display_name) self.assertEqual('abc123%40example.com', unique_id) def test_user_identification_id(self): """Test varius mapping options and how users are identified. This test calls mapped.setup_username() for propagating user object. Test plan: - Check if the user has proper domain ('federated') set - Check if the user has propert type set ('ephemeral') - Check if user's display_name is properly set and equal to unique_id, as it was not explicitly specified in the mapping. """ mapping = mapping_fixtures.MAPPING_USER_IDS rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) assertion = mapping_fixtures.ADMIN_ASSERTION mapped_properties = rp.process(assertion) context = {'environment': {}} self.assertIsNotNone(mapped_properties) self.assertValidMappedUserObject(mapped_properties) unique_id, display_name = mapped.get_user_unique_id_and_display_name( context, mapped_properties) self.assertEqual('bob', unique_id) self.assertEqual('bob', display_name) def test_user_identification_id_and_name(self): """Test varius mapping options and how users are identified. This test calls mapped.setup_username() for propagating user object. Test plan: - Check if the user has proper domain ('federated') set - Check if the user has proper type set ('ephemeral') - Check if display_name is properly set from the assertion - Check if unique_id is properly set and and equal to value hardcoded in the mapping This test does two iterations with different assertions used as input for the Mapping Engine. Different assertions will be matched with different rules in the ruleset, effectively issuing different user_id (hardcoded values). In the first iteration, the hardcoded user_id is not url-safe and we expect Keystone to make it url safe. In the latter iteration, provided user_id is already url-safe and we expect server not to change it. """ testcases = [(mapping_fixtures.CUSTOMER_ASSERTION, 'bwilliams'), (mapping_fixtures.EMPLOYEE_ASSERTION, 'tbo')] for assertion, exp_user_name in testcases: mapping = mapping_fixtures.MAPPING_USER_IDS rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) mapped_properties = rp.process(assertion) context = {'environment': {}} self.assertIsNotNone(mapped_properties) self.assertValidMappedUserObject(mapped_properties) unique_id, display_name = ( mapped.get_user_unique_id_and_display_name(context, mapped_properties) ) self.assertEqual(exp_user_name, display_name) self.assertEqual('abc123%40example.com', unique_id) def test_whitelist_pass_through(self): mapping = mapping_fixtures.MAPPING_GROUPS_WHITELIST_PASS_THROUGH rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) assertion = mapping_fixtures.DEVELOPER_ASSERTION mapped_properties = rp.process(assertion) self.assertValidMappedUserObject(mapped_properties) self.assertEqual('developacct', mapped_properties['user']['name']) self.assertEqual('Developer', mapped_properties['group_names'][0]['name']) def test_mapping_with_incorrect_local_keys(self): mapping = mapping_fixtures.MAPPING_BAD_LOCAL_SETUP self.assertRaises(exception.ValidationError, mapping_utils.validate_mapping_structure, mapping) def test_type_not_in_assertion(self): """Test that if the remote "type" is not in the assertion it fails.""" mapping = mapping_fixtures.MAPPING_GROUPS_WHITELIST_PASS_THROUGH rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) assertion = {uuid.uuid4().hex: uuid.uuid4().hex} self.assertRaises(exception.ValidationError, rp.process, assertion) def test_rule_engine_group_ids_mapping_whitelist(self): """Test mapping engine when group_ids is explicitly set Also test whitelists on group ids """ mapping = mapping_fixtures.MAPPING_GROUPS_IDS_WHITELIST assertion = mapping_fixtures.GROUP_IDS_ASSERTION rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) mapped_properties = rp.process(assertion) self.assertIsNotNone(mapped_properties) self.assertEqual('opilotte', mapped_properties['user']['name']) self.assertListEqual([], mapped_properties['group_names']) self.assertItemsEqual(['abc123', 'ghi789', 'klm012'], mapped_properties['group_ids']) def test_rule_engine_group_ids_mapping_blacklist(self): """Test mapping engine when group_ids is explicitly set. Also test blacklists on group ids """ mapping = mapping_fixtures.MAPPING_GROUPS_IDS_BLACKLIST assertion = mapping_fixtures.GROUP_IDS_ASSERTION rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) mapped_properties = rp.process(assertion) self.assertIsNotNone(mapped_properties) self.assertEqual('opilotte', mapped_properties['user']['name']) self.assertListEqual([], mapped_properties['group_names']) self.assertItemsEqual(['abc123', 'ghi789', 'klm012'], mapped_properties['group_ids']) def test_rule_engine_group_ids_mapping_only_one_group(self): """Test mapping engine when group_ids is explicitly set. If the group ids list has only one group, test if the transformation is done correctly """ mapping = mapping_fixtures.MAPPING_GROUPS_IDS_WHITELIST assertion = mapping_fixtures.GROUP_IDS_ASSERTION_ONLY_ONE_GROUP rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) mapped_properties = rp.process(assertion) self.assertIsNotNone(mapped_properties) self.assertEqual('opilotte', mapped_properties['user']['name']) self.assertListEqual([], mapped_properties['group_names']) self.assertItemsEqual(['210mlk', '321cba'], mapped_properties['group_ids']) class TestUnicodeAssertionData(unit.BaseTestCase): """Ensure that unicode data in the assertion headers works. Bug #1525250 reported that something was not getting correctly encoded and/or decoded when assertion data contained non-ASCII characters. This test class mimics what happens in a real HTTP request. """ def setUp(self): super(TestUnicodeAssertionData, self).setUp() self.config_fixture = self.useFixture(config_fixture.Config(cfg.CONF)) self.config_fixture.config(group='federation', assertion_prefix='PFX') def _pull_mapping_rules_from_the_database(self): # NOTE(dstanek): In a live system. The rules are dumped into JSON bytes # before being # stored in the database. Upon retrieval the bytes are # loaded and the resulting dictionary is full of unicode text strings. # Most of tests in this file incorrectly assume the mapping fixture # dictionary is the same as what it would look like coming out of the # database. The string, when coming out of the database, are all text. return jsonutils.loads(jsonutils.dumps( mapping_fixtures.MAPPING_UNICODE)) def _pull_assertion_from_the_request_headers(self): # NOTE(dstanek): In a live system the bytes for the assertion are # pulled from the HTTP headers. These bytes may be decodable as # ISO-8859-1 according to Section 3.2.4 of RFC 7230. Let's assume # that our web server plugins are correctly encoding the data. context = dict(environment=mapping_fixtures.UNICODE_NAME_ASSERTION) data = mapping_utils.get_assertion_params_from_env(context) # NOTE(dstanek): keystone.auth.plugins.mapped return dict(data) def test_unicode(self): mapping = self._pull_mapping_rules_from_the_database() assertion = self._pull_assertion_from_the_request_headers() rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) values = rp.process(assertion) fn = assertion.get('PFX_FirstName') ln = assertion.get('PFX_LastName') full_name = '%s %s' % (fn, ln) user_name = values.get('user', {}).get('name') self.assertEqual(full_name, user_name) keystone-9.0.0/keystone/tests/unit/config_files/0000775000567000056710000000000012701407246023136 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/config_files/backend_mysql.conf0000664000567000056710000000026512701407105026616 0ustar jenkinsjenkins00000000000000#Used for running the Migrate tests against a live MySQL Server #See _sql_livetest.py [database] connection = mysql+pymysql://keystone:keystone@localhost/keystone_test?charset=utf8 keystone-9.0.0/keystone/tests/unit/config_files/backend_sql.conf0000664000567000056710000000050112701407102026236 0ustar jenkinsjenkins00000000000000[database] #For a specific location file based SQLite use: #connection = sqlite:////tmp/keystone.db #To Test MySQL: #connection = mysql+pymysql://keystone:keystone@localhost/keystone?charset=utf8 #To Test PostgreSQL: #connection = postgresql://keystone:keystone@localhost/keystone?client_encoding=utf8 idle_timeout = 200 keystone-9.0.0/keystone/tests/unit/config_files/backend_db2.conf0000664000567000056710000000024112701407105026112 0ustar jenkinsjenkins00000000000000#Used for running the Migrate tests against a live DB2 Server #See _sql_livetest.py [database] connection = ibm_db_sa://keystone:keystone@/staktest?charset=utf8 keystone-9.0.0/keystone/tests/unit/config_files/backend_ldap.conf0000664000567000056710000000013212701407102026357 0ustar jenkinsjenkins00000000000000[ldap] url = fake://memory user = cn=Admin password = password suffix = cn=example,cn=com keystone-9.0.0/keystone/tests/unit/config_files/domain_configs_default_ldap_one_sql/0000775000567000056710000000000012701407246032341 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000015200000000000011213 Lustar 00000000000000keystone-9.0.0/keystone/tests/unit/config_files/domain_configs_default_ldap_one_sql/keystone.domain1.confkeystone-9.0.0/keystone/tests/unit/config_files/domain_configs_default_ldap_one_sql/keystone.domain10000664000567000056710000000017212701407102035443 0ustar jenkinsjenkins00000000000000# The domain-specific configuration file for the test domain # 'domain1' for use with unit tests. [identity] driver = sqlkeystone-9.0.0/keystone/tests/unit/config_files/backend_pool_liveldap.conf0000664000567000056710000000156312701407102030301 0ustar jenkinsjenkins00000000000000[ldap] url = ldap://localhost user = cn=Manager,dc=openstack,dc=org password = test suffix = dc=openstack,dc=org group_tree_dn = ou=UserGroups,dc=openstack,dc=org user_tree_dn = ou=Users,dc=openstack,dc=org user_enabled_emulation = True user_mail_attribute = mail use_dumb_member = True # Connection pooling specific attributes # Enable LDAP connection pooling. (boolean value) use_pool=true # Connection pool size. (integer value) pool_size=5 # Connection lifetime in seconds. # (integer value) pool_connection_lifetime=60 # Enable LDAP connection pooling for end user authentication. # If use_pool is disabled, then this setting is meaningless # and is not used at all. (boolean value) use_auth_pool=true # End user auth connection pool size. (integer value) auth_pool_size=50 # End user auth connection lifetime in seconds. (integer # value) auth_pool_connection_lifetime=300keystone-9.0.0/keystone/tests/unit/config_files/domain_configs_multi_ldap/0000775000567000056710000000000012701407246030327 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/config_files/domain_configs_multi_ldap/keystone.domain1.conf0000664000567000056710000000035112701407102034354 0ustar jenkinsjenkins00000000000000# The domain-specific configuration file for the test domain # 'domain1' for use with unit tests. [ldap] url = fake://memory1 user = cn=Admin password = password suffix = cn=example,cn=com [identity] driver = ldap list_limit = 101 keystone-9.0.0/keystone/tests/unit/config_files/domain_configs_multi_ldap/keystone.domain2.conf0000664000567000056710000000045512701407102034362 0ustar jenkinsjenkins00000000000000# The domain-specific configuration file for the test domain # 'domain2' for use with unit tests. [ldap] url = fake://memory user = cn=Admin password = password suffix = cn=myroot,cn=com group_tree_dn = ou=UserGroups,dc=myroot,dc=org user_tree_dn = ou=Users,dc=myroot,dc=org [identity] driver = ldapkeystone-9.0.0/keystone/tests/unit/config_files/domain_configs_multi_ldap/keystone.Default.conf0000664000567000056710000000050612701407102034412 0ustar jenkinsjenkins00000000000000# The domain-specific configuration file for the default domain for # use with unit tests. # # The domain_name of the default domain is 'Default', hence the # strange mix of upper/lower case in the file name. [ldap] url = fake://memory user = cn=Admin password = password suffix = cn=example,cn=com [identity] driver = ldap keystone-9.0.0/keystone/tests/unit/config_files/backend_liveldap.conf0000664000567000056710000000043712701407102027247 0ustar jenkinsjenkins00000000000000[ldap] url = ldap://localhost user = cn=Manager,dc=openstack,dc=org password = test suffix = dc=openstack,dc=org group_tree_dn = ou=UserGroups,dc=openstack,dc=org user_tree_dn = ou=Users,dc=openstack,dc=org user_enabled_emulation = True user_mail_attribute = mail use_dumb_member = True keystone-9.0.0/keystone/tests/unit/config_files/deprecated_override.conf0000664000567000056710000000047612701407102030002 0ustar jenkinsjenkins00000000000000# Options in this file are deprecated. See test_config. [sql] # These options were deprecated in Icehouse with the switch to oslo's # db.sqlalchemy. connection = sqlite://deprecated idle_timeout = 54321 [database] # These are the new options from the [sql] section. connection = sqlite://new idle_timeout = 65432 keystone-9.0.0/keystone/tests/unit/config_files/backend_tls_liveldap.conf0000664000567000056710000000063012701407102030124 0ustar jenkinsjenkins00000000000000[ldap] url = ldap:// user = dc=Manager,dc=openstack,dc=org password = test suffix = dc=openstack,dc=org group_tree_dn = ou=UserGroups,dc=openstack,dc=org user_tree_dn = ou=Users,dc=openstack,dc=org user_enabled_emulation = True user_mail_attribute = mail use_dumb_member = True use_tls = True tls_cacertfile = /etc/keystone/ssl/certs/cacert.pem tls_cacertdir = /etc/keystone/ssl/certs/ tls_req_cert = demand keystone-9.0.0/keystone/tests/unit/config_files/domain_configs_one_extra_sql/0000775000567000056710000000000012701407246031040 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/config_files/domain_configs_one_extra_sql/keystone.domain2.conf0000664000567000056710000000017212701407102035067 0ustar jenkinsjenkins00000000000000# The domain-specific configuration file for the test domain # 'domain2' for use with unit tests. [identity] driver = sqlkeystone-9.0.0/keystone/tests/unit/config_files/backend_multi_ldap_sql.conf0000664000567000056710000000050512701407102030454 0ustar jenkinsjenkins00000000000000[database] connection = sqlite:// #For a file based sqlite use #connection = sqlite:////tmp/keystone.db #To Test MySQL: #connection = mysql+pymysql://keystone:keystone@localhost/keystone?charset=utf8 #To Test PostgreSQL: #connection = postgresql://keystone:keystone@localhost/keystone?client_encoding=utf8 idle_timeout = 200 keystone-9.0.0/keystone/tests/unit/config_files/backend_ldap_pool.conf0000664000567000056710000000207612701407102027421 0ustar jenkinsjenkins00000000000000[ldap] url = fakepool://memory user = cn=Admin password = password backend_entities = ['Tenant', 'User', 'UserRoleAssociation', 'Role', 'Group', 'Domain'] suffix = cn=example,cn=com # Connection pooling specific attributes # Enable LDAP connection pooling. (boolean value) use_pool=true # Connection pool size. (integer value) pool_size=5 # Maximum count of reconnect trials. (integer value) pool_retry_max=2 # Time span in seconds to wait between two reconnect trials. # (floating point value) pool_retry_delay=0.2 # Connector timeout in seconds. Value -1 indicates indefinite # wait for response. (integer value) pool_connection_timeout=-1 # Connection lifetime in seconds. # (integer value) pool_connection_lifetime=600 # Enable LDAP connection pooling for end user authentication. # If use_pool is disabled, then this setting is meaningless # and is not used at all. (boolean value) use_auth_pool=true # End user auth connection pool size. (integer value) auth_pool_size=50 # End user auth connection lifetime in seconds. (integer # value) auth_pool_connection_lifetime=60keystone-9.0.0/keystone/tests/unit/config_files/deprecated.conf0000664000567000056710000000031512701407102026073 0ustar jenkinsjenkins00000000000000# Options in this file are deprecated. See test_config. [sql] # These options were deprecated in Icehouse with the switch to oslo's # db.sqlalchemy. connection = sqlite://deprecated idle_timeout = 54321 keystone-9.0.0/keystone/tests/unit/config_files/backend_postgresql.conf0000664000567000056710000000027712701407105027657 0ustar jenkinsjenkins00000000000000#Used for running the Migrate tests against a live Postgresql Server #See _sql_livetest.py [database] connection = postgresql://keystone:keystone@localhost/keystone_test?client_encoding=utf8 keystone-9.0.0/keystone/tests/unit/config_files/backend_ldap_sql.conf0000664000567000056710000000063412701407102027245 0ustar jenkinsjenkins00000000000000[database] #For a specific location file based SQLite use: #connection = sqlite:////tmp/keystone.db #To Test MySQL: #connection = mysql+pymysql://keystone:keystone@localhost/keystone?charset=utf8 #To Test PostgreSQL: #connection = postgresql://keystone:keystone@localhost/keystone?client_encoding=utf8 idle_timeout = 200 [ldap] url = fake://memory user = cn=Admin password = password suffix = cn=example,cn=com keystone-9.0.0/keystone/tests/unit/config_files/domain_configs_one_sql_one_ldap/0000775000567000056710000000000012701407246031476 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000keystone-9.0.0/keystone/tests/unit/config_files/domain_configs_one_sql_one_ldap/keystone.domain1.confkeystone-9.0.0/keystone/tests/unit/config_files/domain_configs_one_sql_one_ldap/keystone.domain1.con0000664000567000056710000000017212701407102035356 0ustar jenkinsjenkins00000000000000# The domain-specific configuration file for the test domain # 'domain1' for use with unit tests. [identity] driver = sql././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000keystone-9.0.0/keystone/tests/unit/config_files/domain_configs_one_sql_one_ldap/keystone.Default.confkeystone-9.0.0/keystone/tests/unit/config_files/domain_configs_one_sql_one_ldap/keystone.Default.con0000664000567000056710000000050512701407102035412 0ustar jenkinsjenkins00000000000000# The domain-specific configuration file for the default domain for # use with unit tests. # # The domain_name of the default domain is 'Default', hence the # strange mix of upper/lower case in the file name. [ldap] url = fake://memory user = cn=Admin password = password suffix = cn=example,cn=com [identity] driver = ldapkeystone-9.0.0/keystone/tests/unit/config_files/test_auth_plugin.conf0000664000567000056710000000025712701407102027356 0ustar jenkinsjenkins00000000000000[auth] methods = external,password,token,simple_challenge_response,saml2,openid,x509 simple_challenge_response = keystone.tests.unit.test_auth_plugin.SimpleChallengeResponse keystone-9.0.0/keystone/tests/unit/test_entry_points.py0000664000567000056710000000312212701407102024642 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import stevedore from testtools import matchers from keystone.tests.unit import core as test class TestPasteDeploymentEntryPoints(test.TestCase): def test_entry_point_middleware(self): """Assert that our list of expected middleware is present.""" expected_names = [ 'admin_token_auth', 'build_auth_context', 'crud_extension', 'cors', 'debug', 'endpoint_filter_extension', 'ec2_extension', 'ec2_extension_v3', 'federation_extension', 'json_body', 'oauth1_extension', 'request_id', 'revoke_extension', 's3_extension', 'simple_cert_extension', 'sizelimit', 'token_auth', 'url_normalize', 'user_crud_extension', ] em = stevedore.ExtensionManager('paste.filter_factory') actual_names = [extension.name for extension in em] self.assertThat(actual_names, matchers.ContainsAll(expected_names)) keystone-9.0.0/keystone/tests/unit/test_no_admin_token_auth.py0000664000567000056710000000433612701407102026122 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from six.moves import http_client import webtest from keystone.tests import unit class TestNoAdminTokenAuth(unit.TestCase): def setUp(self): super(TestNoAdminTokenAuth, self).setUp() self.load_backends() self._generate_paste_config() self.admin_app = webtest.TestApp( self.loadapp(unit.dirs.tmp('no_admin_token_auth'), name='admin'), extra_environ=dict(REMOTE_ADDR='127.0.0.1')) self.addCleanup(setattr, self, 'admin_app', None) def _generate_paste_config(self): # Generate a file, based on keystone-paste.ini, that doesn't include # admin_token_auth in the pipeline with open(unit.dirs.etc('keystone-paste.ini'), 'r') as f: contents = f.read() new_contents = contents.replace(' admin_token_auth ', ' ') filename = unit.dirs.tmp('no_admin_token_auth-paste.ini') with open(filename, 'w') as f: f.write(new_contents) self.addCleanup(os.remove, filename) def test_request_no_admin_token_auth(self): # This test verifies that if the admin_token_auth middleware isn't # in the paste pipeline that users can still make requests. # Note(blk-u): Picked /v2.0/tenants because it's an operation that # requires is_admin in the context, any operation that requires # is_admin would work for this test. REQ_PATH = '/v2.0/tenants' # If the following does not raise, then the test is successful. self.admin_app.get(REQ_PATH, headers={'X-Auth-Token': 'NotAdminToken'}, status=http_client.UNAUTHORIZED) keystone-9.0.0/keystone/tests/unit/test_v2.py0000664000567000056710000015676012701407102022455 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import time import uuid from keystoneclient.common import cms from oslo_config import cfg import six from six.moves import http_client from testtools import matchers from keystone.common import extension as keystone_extension from keystone.tests import unit from keystone.tests.unit import ksfixtures from keystone.tests.unit import rest from keystone.tests.unit.schema import v2 CONF = cfg.CONF class CoreApiTests(object): def assertValidError(self, error): self.assertIsNotNone(error.get('code')) self.assertIsNotNone(error.get('title')) self.assertIsNotNone(error.get('message')) def assertValidVersion(self, version): self.assertIsNotNone(version) self.assertIsNotNone(version.get('id')) self.assertIsNotNone(version.get('status')) self.assertIsNotNone(version.get('updated')) def assertValidExtension(self, extension): self.assertIsNotNone(extension) self.assertIsNotNone(extension.get('name')) self.assertIsNotNone(extension.get('namespace')) self.assertIsNotNone(extension.get('alias')) self.assertIsNotNone(extension.get('updated')) def assertValidExtensionLink(self, link): self.assertIsNotNone(link.get('rel')) self.assertIsNotNone(link.get('type')) self.assertIsNotNone(link.get('href')) def assertValidTenant(self, tenant): self.assertIsNotNone(tenant.get('id')) self.assertIsNotNone(tenant.get('name')) self.assertNotIn('domain_id', tenant) self.assertNotIn('parent_id', tenant) def assertValidUser(self, user): self.assertIsNotNone(user.get('id')) self.assertIsNotNone(user.get('name')) def assertValidRole(self, tenant): self.assertIsNotNone(tenant.get('id')) self.assertIsNotNone(tenant.get('name')) def test_public_not_found(self): r = self.public_request( path='/%s' % uuid.uuid4().hex, expected_status=http_client.NOT_FOUND) self.assertValidErrorResponse(r) def test_admin_not_found(self): r = self.admin_request( path='/%s' % uuid.uuid4().hex, expected_status=http_client.NOT_FOUND) self.assertValidErrorResponse(r) def test_public_multiple_choice(self): r = self.public_request(path='/', expected_status=300) self.assertValidMultipleChoiceResponse(r) def test_admin_multiple_choice(self): r = self.admin_request(path='/', expected_status=300) self.assertValidMultipleChoiceResponse(r) def test_public_version(self): r = self.public_request(path='/v2.0/') self.assertValidVersionResponse(r) def test_admin_version(self): r = self.admin_request(path='/v2.0/') self.assertValidVersionResponse(r) def test_public_extensions(self): r = self.public_request(path='/v2.0/extensions') self.assertValidExtensionListResponse( r, keystone_extension.PUBLIC_EXTENSIONS) def test_admin_extensions(self): r = self.admin_request(path='/v2.0/extensions') self.assertValidExtensionListResponse( r, keystone_extension.ADMIN_EXTENSIONS) def test_admin_extensions_returns_not_found(self): self.admin_request(path='/v2.0/extensions/invalid-extension', expected_status=http_client.NOT_FOUND) def test_public_osksadm_extension_returns_not_found(self): self.public_request(path='/v2.0/extensions/OS-KSADM', expected_status=http_client.NOT_FOUND) def test_admin_osksadm_extension(self): r = self.admin_request(path='/v2.0/extensions/OS-KSADM') self.assertValidExtensionResponse( r, keystone_extension.ADMIN_EXTENSIONS) def test_authenticate(self): r = self.public_request( method='POST', path='/v2.0/tokens', body={ 'auth': { 'passwordCredentials': { 'username': self.user_foo['name'], 'password': self.user_foo['password'], }, 'tenantId': self.tenant_bar['id'], }, }, expected_status=http_client.OK) self.assertValidAuthenticationResponse(r, require_service_catalog=True) def test_authenticate_unscoped(self): r = self.public_request( method='POST', path='/v2.0/tokens', body={ 'auth': { 'passwordCredentials': { 'username': self.user_foo['name'], 'password': self.user_foo['password'], }, }, }, expected_status=http_client.OK) self.assertValidAuthenticationResponse(r) def test_get_tenants_for_token(self): r = self.public_request(path='/v2.0/tenants', token=self.get_scoped_token()) self.assertValidTenantListResponse(r) def test_validate_token(self): token = self.get_scoped_token() r = self.admin_request( path='/v2.0/tokens/%(token_id)s' % { 'token_id': token, }, token=token) self.assertValidAuthenticationResponse(r) def test_invalid_token_returns_not_found(self): token = self.get_scoped_token() self.admin_request( path='/v2.0/tokens/%(token_id)s' % { 'token_id': 'invalid', }, token=token, expected_status=http_client.NOT_FOUND) def test_validate_token_service_role(self): self.md_foobar = self.assignment_api.add_role_to_user_and_project( self.user_foo['id'], self.tenant_service['id'], self.role_service['id']) token = self.get_scoped_token(tenant_id='service') r = self.admin_request( path='/v2.0/tokens/%s' % token, token=token) self.assertValidAuthenticationResponse(r) def test_remove_role_revokes_token(self): self.md_foobar = self.assignment_api.add_role_to_user_and_project( self.user_foo['id'], self.tenant_service['id'], self.role_service['id']) token = self.get_scoped_token(tenant_id='service') r = self.admin_request( path='/v2.0/tokens/%s' % token, token=token) self.assertValidAuthenticationResponse(r) self.assignment_api.remove_role_from_user_and_project( self.user_foo['id'], self.tenant_service['id'], self.role_service['id']) r = self.admin_request( path='/v2.0/tokens/%s' % token, token=token, expected_status=http_client.UNAUTHORIZED) def test_validate_token_belongs_to(self): token = self.get_scoped_token() path = ('/v2.0/tokens/%s?belongsTo=%s' % (token, self.tenant_bar['id'])) r = self.admin_request(path=path, token=token) self.assertValidAuthenticationResponse(r, require_service_catalog=True) def test_validate_token_no_belongs_to_still_returns_catalog(self): token = self.get_scoped_token() path = ('/v2.0/tokens/%s' % token) r = self.admin_request(path=path, token=token) self.assertValidAuthenticationResponse(r, require_service_catalog=True) def test_validate_token_head(self): """The same call as above, except using HEAD. There's no response to validate here, but this is included for the sake of completely covering the core API. """ token = self.get_scoped_token() self.admin_request( method='HEAD', path='/v2.0/tokens/%(token_id)s' % { 'token_id': token, }, token=token, expected_status=http_client.OK) def test_endpoints(self): token = self.get_scoped_token() r = self.admin_request( path='/v2.0/tokens/%(token_id)s/endpoints' % { 'token_id': token, }, token=token) self.assertValidEndpointListResponse(r) def test_get_tenant(self): token = self.get_scoped_token() r = self.admin_request( path='/v2.0/tenants/%(tenant_id)s' % { 'tenant_id': self.tenant_bar['id'], }, token=token) self.assertValidTenantResponse(r) def test_get_tenant_by_name(self): token = self.get_scoped_token() r = self.admin_request( path='/v2.0/tenants?name=%(tenant_name)s' % { 'tenant_name': self.tenant_bar['name'], }, token=token) self.assertValidTenantResponse(r) def test_get_user_roles_with_tenant(self): token = self.get_scoped_token() r = self.admin_request( path='/v2.0/tenants/%(tenant_id)s/users/%(user_id)s/roles' % { 'tenant_id': self.tenant_bar['id'], 'user_id': self.user_foo['id'], }, token=token) self.assertValidRoleListResponse(r) def test_get_user_roles_without_tenant(self): token = self.get_scoped_token() self.admin_request( path='/v2.0/users/%(user_id)s/roles' % { 'user_id': self.user_foo['id'], }, token=token, expected_status=http_client.NOT_IMPLEMENTED) def test_get_user(self): token = self.get_scoped_token() r = self.admin_request( path='/v2.0/users/%(user_id)s' % { 'user_id': self.user_foo['id'], }, token=token) self.assertValidUserResponse(r) def test_get_user_by_name(self): token = self.get_scoped_token() r = self.admin_request( path='/v2.0/users?name=%(user_name)s' % { 'user_name': self.user_foo['name'], }, token=token) self.assertValidUserResponse(r) def test_create_update_user_invalid_enabled_type(self): # Enforce usage of boolean for 'enabled' field token = self.get_scoped_token() # Test CREATE request r = self.admin_request( method='POST', path='/v2.0/users', body={ 'user': { 'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex, 'enabled': "False", }, }, token=token, expected_status=http_client.BAD_REQUEST) self.assertValidErrorResponse(r) r = self.admin_request( method='POST', path='/v2.0/users', body={ 'user': { 'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex, # In JSON, 0|1 are not booleans 'enabled': 0, }, }, token=token, expected_status=http_client.BAD_REQUEST) self.assertValidErrorResponse(r) # Test UPDATE request path = '/v2.0/users/%(user_id)s' % { 'user_id': self.user_foo['id'], } r = self.admin_request( method='PUT', path=path, body={ 'user': { 'enabled': "False", }, }, token=token, expected_status=http_client.BAD_REQUEST) self.assertValidErrorResponse(r) r = self.admin_request( method='PUT', path=path, body={ 'user': { # In JSON, 0|1 are not booleans 'enabled': 1, }, }, token=token, expected_status=http_client.BAD_REQUEST) self.assertValidErrorResponse(r) def test_create_update_user_valid_enabled_type(self): # Enforce usage of boolean for 'enabled' field token = self.get_scoped_token() # Test CREATE request self.admin_request(method='POST', path='/v2.0/users', body={ 'user': { 'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex, 'enabled': False, }, }, token=token, expected_status=http_client.OK) def test_error_response(self): """This triggers assertValidErrorResponse by convention.""" self.public_request(path='/v2.0/tenants', expected_status=http_client.UNAUTHORIZED) def test_invalid_parameter_error_response(self): token = self.get_scoped_token() bad_body = { 'OS-KSADM:service%s' % uuid.uuid4().hex: { 'name': uuid.uuid4().hex, 'type': uuid.uuid4().hex, }, } res = self.admin_request(method='POST', path='/v2.0/OS-KSADM/services', body=bad_body, token=token, expected_status=http_client.BAD_REQUEST) self.assertValidErrorResponse(res) res = self.admin_request(method='POST', path='/v2.0/users', body=bad_body, token=token, expected_status=http_client.BAD_REQUEST) self.assertValidErrorResponse(res) def _get_user_id(self, r): """Helper method to return user ID from a response. This needs to be overridden by child classes based on their content type. """ raise NotImplementedError() def _get_role_id(self, r): """Helper method to return a role ID from a response. This needs to be overridden by child classes based on their content type. """ raise NotImplementedError() def _get_role_name(self, r): """Helper method to return role NAME from a response. This needs to be overridden by child classes based on their content type. """ raise NotImplementedError() def _get_project_id(self, r): """Helper method to return project ID from a response. This needs to be overridden by child classes based on their content type. """ raise NotImplementedError() def assertNoRoles(self, r): """Helper method to assert No Roles This needs to be overridden by child classes based on their content type. """ raise NotImplementedError() def test_update_user_tenant(self): token = self.get_scoped_token() # Create a new user r = self.admin_request( method='POST', path='/v2.0/users', body={ 'user': { 'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex, 'tenantId': self.tenant_bar['id'], 'enabled': True, }, }, token=token, expected_status=http_client.OK) user_id = self._get_user_id(r.result) # Check if member_role is in tenant_bar r = self.admin_request( path='/v2.0/tenants/%(project_id)s/users/%(user_id)s/roles' % { 'project_id': self.tenant_bar['id'], 'user_id': user_id }, token=token, expected_status=http_client.OK) self.assertEqual(CONF.member_role_name, self._get_role_name(r.result)) # Create a new tenant r = self.admin_request( method='POST', path='/v2.0/tenants', body={ 'tenant': { 'name': 'test_update_user', 'description': 'A description ...', 'enabled': True, }, }, token=token, expected_status=http_client.OK) project_id = self._get_project_id(r.result) # Update user's tenant r = self.admin_request( method='PUT', path='/v2.0/users/%(user_id)s' % { 'user_id': user_id, }, body={ 'user': { 'tenantId': project_id, }, }, token=token, expected_status=http_client.OK) # 'member_role' should be in new_tenant r = self.admin_request( path='/v2.0/tenants/%(project_id)s/users/%(user_id)s/roles' % { 'project_id': project_id, 'user_id': user_id }, token=token, expected_status=http_client.OK) self.assertEqual('_member_', self._get_role_name(r.result)) # 'member_role' should not be in tenant_bar any more r = self.admin_request( path='/v2.0/tenants/%(project_id)s/users/%(user_id)s/roles' % { 'project_id': self.tenant_bar['id'], 'user_id': user_id }, token=token, expected_status=http_client.OK) self.assertNoRoles(r.result) def test_update_user_with_invalid_tenant(self): token = self.get_scoped_token() # Create a new user r = self.admin_request( method='POST', path='/v2.0/users', body={ 'user': { 'name': 'test_invalid_tenant', 'password': uuid.uuid4().hex, 'tenantId': self.tenant_bar['id'], 'enabled': True, }, }, token=token, expected_status=http_client.OK) user_id = self._get_user_id(r.result) # Update user with an invalid tenant r = self.admin_request( method='PUT', path='/v2.0/users/%(user_id)s' % { 'user_id': user_id, }, body={ 'user': { 'tenantId': 'abcde12345heha', }, }, token=token, expected_status=http_client.NOT_FOUND) def test_update_user_with_invalid_tenant_no_prev_tenant(self): token = self.get_scoped_token() # Create a new user r = self.admin_request( method='POST', path='/v2.0/users', body={ 'user': { 'name': 'test_invalid_tenant', 'password': uuid.uuid4().hex, 'enabled': True, }, }, token=token, expected_status=http_client.OK) user_id = self._get_user_id(r.result) # Update user with an invalid tenant r = self.admin_request( method='PUT', path='/v2.0/users/%(user_id)s' % { 'user_id': user_id, }, body={ 'user': { 'tenantId': 'abcde12345heha', }, }, token=token, expected_status=http_client.NOT_FOUND) def test_update_user_with_old_tenant(self): token = self.get_scoped_token() # Create a new user r = self.admin_request( method='POST', path='/v2.0/users', body={ 'user': { 'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex, 'tenantId': self.tenant_bar['id'], 'enabled': True, }, }, token=token, expected_status=http_client.OK) user_id = self._get_user_id(r.result) # Check if member_role is in tenant_bar r = self.admin_request( path='/v2.0/tenants/%(project_id)s/users/%(user_id)s/roles' % { 'project_id': self.tenant_bar['id'], 'user_id': user_id }, token=token, expected_status=http_client.OK) self.assertEqual(CONF.member_role_name, self._get_role_name(r.result)) # Update user's tenant with old tenant id r = self.admin_request( method='PUT', path='/v2.0/users/%(user_id)s' % { 'user_id': user_id, }, body={ 'user': { 'tenantId': self.tenant_bar['id'], }, }, token=token, expected_status=http_client.OK) # 'member_role' should still be in tenant_bar r = self.admin_request( path='/v2.0/tenants/%(project_id)s/users/%(user_id)s/roles' % { 'project_id': self.tenant_bar['id'], 'user_id': user_id }, token=token, expected_status=http_client.OK) self.assertEqual('_member_', self._get_role_name(r.result)) def test_authenticating_a_user_with_no_password(self): token = self.get_scoped_token() username = uuid.uuid4().hex # create the user self.admin_request( method='POST', path='/v2.0/users', body={ 'user': { 'name': username, 'enabled': True, }, }, token=token) # fail to authenticate r = self.public_request( method='POST', path='/v2.0/tokens', body={ 'auth': { 'passwordCredentials': { 'username': username, 'password': 'password', }, }, }, expected_status=http_client.UNAUTHORIZED) self.assertValidErrorResponse(r) def test_www_authenticate_header(self): r = self.public_request( path='/v2.0/tenants', expected_status=http_client.UNAUTHORIZED) self.assertEqual('Keystone uri="http://localhost"', r.headers.get('WWW-Authenticate')) def test_www_authenticate_header_host(self): test_url = 'http://%s:4187' % uuid.uuid4().hex self.config_fixture.config(public_endpoint=test_url) r = self.public_request( path='/v2.0/tenants', expected_status=http_client.UNAUTHORIZED) self.assertEqual('Keystone uri="%s"' % test_url, r.headers.get('WWW-Authenticate')) class LegacyV2UsernameTests(object): """Tests to show the broken username behavior in V2. The V2 API is documented to use `username` instead of `name`. The API forced used to use name and left the username to fall into the `extra` field. These tests ensure this behavior works so fixes to `username`/`name` will be backward compatible. """ def create_user(self, **user_attrs): """Creates a users and returns the response object. :param user_attrs: attributes added to the request body (optional) """ token = self.get_scoped_token() body = { 'user': { 'name': uuid.uuid4().hex, 'enabled': True, }, } body['user'].update(user_attrs) return self.admin_request( method='POST', path='/v2.0/users', token=token, body=body, expected_status=http_client.OK) def test_create_with_extra_username(self): """The response for creating a user will contain the extra fields.""" fake_username = uuid.uuid4().hex r = self.create_user(username=fake_username) self.assertValidUserResponse(r) user = self.get_user_from_response(r) self.assertEqual(fake_username, user.get('username')) def test_get_returns_username_from_extra(self): """The response for getting a user will contain the extra fields.""" token = self.get_scoped_token() fake_username = uuid.uuid4().hex r = self.create_user(username=fake_username) id_ = self.get_user_attribute_from_response(r, 'id') r = self.admin_request(path='/v2.0/users/%s' % id_, token=token) self.assertValidUserResponse(r) user = self.get_user_from_response(r) self.assertEqual(fake_username, user.get('username')) def test_update_returns_new_username_when_adding_username(self): """The response for updating a user will contain the extra fields. This is specifically testing for updating a username when a value was not previously set. """ token = self.get_scoped_token() r = self.create_user() id_ = self.get_user_attribute_from_response(r, 'id') name = self.get_user_attribute_from_response(r, 'name') enabled = self.get_user_attribute_from_response(r, 'enabled') r = self.admin_request( method='PUT', path='/v2.0/users/%s' % id_, token=token, body={ 'user': { 'name': name, 'username': 'new_username', 'enabled': enabled, }, }, expected_status=http_client.OK) self.assertValidUserResponse(r) user = self.get_user_from_response(r) self.assertEqual('new_username', user.get('username')) def test_update_returns_new_username_when_updating_username(self): """The response for updating a user will contain the extra fields. This tests updating a username that was previously set. """ token = self.get_scoped_token() r = self.create_user(username='original_username') id_ = self.get_user_attribute_from_response(r, 'id') name = self.get_user_attribute_from_response(r, 'name') enabled = self.get_user_attribute_from_response(r, 'enabled') r = self.admin_request( method='PUT', path='/v2.0/users/%s' % id_, token=token, body={ 'user': { 'name': name, 'username': 'new_username', 'enabled': enabled, }, }, expected_status=http_client.OK) self.assertValidUserResponse(r) user = self.get_user_from_response(r) self.assertEqual('new_username', user.get('username')) def test_username_is_always_returned_create(self): """Username is set as the value of name if no username is provided. This matches the v2.0 spec where we really should be using username and not name. """ r = self.create_user() self.assertValidUserResponse(r) user = self.get_user_from_response(r) self.assertEqual(user.get('name'), user.get('username')) def test_username_is_always_returned_get(self): """Username is set as the value of name if no username is provided. This matches the v2.0 spec where we really should be using username and not name. """ token = self.get_scoped_token() r = self.create_user() id_ = self.get_user_attribute_from_response(r, 'id') r = self.admin_request(path='/v2.0/users/%s' % id_, token=token) self.assertValidUserResponse(r) user = self.get_user_from_response(r) self.assertEqual(user.get('name'), user.get('username')) def test_username_is_always_returned_get_by_name(self): """Username is set as the value of name if no username is provided. This matches the v2.0 spec where we really should be using username and not name. """ token = self.get_scoped_token() r = self.create_user() name = self.get_user_attribute_from_response(r, 'name') r = self.admin_request(path='/v2.0/users?name=%s' % name, token=token) self.assertValidUserResponse(r) user = self.get_user_from_response(r) self.assertEqual(user.get('name'), user.get('username')) def test_username_is_always_returned_update_no_username_provided(self): """Username is set as the value of name if no username is provided. This matches the v2.0 spec where we really should be using username and not name. """ token = self.get_scoped_token() r = self.create_user() id_ = self.get_user_attribute_from_response(r, 'id') name = self.get_user_attribute_from_response(r, 'name') enabled = self.get_user_attribute_from_response(r, 'enabled') r = self.admin_request( method='PUT', path='/v2.0/users/%s' % id_, token=token, body={ 'user': { 'name': name, 'enabled': enabled, }, }, expected_status=http_client.OK) self.assertValidUserResponse(r) user = self.get_user_from_response(r) self.assertEqual(user.get('name'), user.get('username')) def test_updated_username_is_returned(self): """Username is set as the value of name if no username is provided. This matches the v2.0 spec where we really should be using username and not name. """ token = self.get_scoped_token() r = self.create_user() id_ = self.get_user_attribute_from_response(r, 'id') name = self.get_user_attribute_from_response(r, 'name') enabled = self.get_user_attribute_from_response(r, 'enabled') r = self.admin_request( method='PUT', path='/v2.0/users/%s' % id_, token=token, body={ 'user': { 'name': name, 'enabled': enabled, }, }, expected_status=http_client.OK) self.assertValidUserResponse(r) user = self.get_user_from_response(r) self.assertEqual(user.get('name'), user.get('username')) def test_username_can_be_used_instead_of_name_create(self): token = self.get_scoped_token() r = self.admin_request( method='POST', path='/v2.0/users', token=token, body={ 'user': { 'username': uuid.uuid4().hex, 'enabled': True, }, }, expected_status=http_client.OK) self.assertValidUserResponse(r) user = self.get_user_from_response(r) self.assertEqual(user.get('name'), user.get('username')) def test_username_can_be_used_instead_of_name_update(self): token = self.get_scoped_token() r = self.create_user() id_ = self.get_user_attribute_from_response(r, 'id') new_username = uuid.uuid4().hex enabled = self.get_user_attribute_from_response(r, 'enabled') r = self.admin_request( method='PUT', path='/v2.0/users/%s' % id_, token=token, body={ 'user': { 'username': new_username, 'enabled': enabled, }, }, expected_status=http_client.OK) self.assertValidUserResponse(r) user = self.get_user_from_response(r) self.assertEqual(new_username, user.get('name')) self.assertEqual(user.get('name'), user.get('username')) class RestfulTestCase(rest.RestfulTestCase): def setUp(self): super(RestfulTestCase, self).setUp() # TODO(termie): add an admin user to the fixtures and use that user # override the fixtures, for now self.assignment_api.add_role_to_user_and_project( self.user_foo['id'], self.tenant_bar['id'], self.role_admin['id']) class V2TestCase(RestfulTestCase, CoreApiTests, LegacyV2UsernameTests): def config_overrides(self): super(V2TestCase, self).config_overrides() self.config_fixture.config( group='catalog', driver='templated', template_file=unit.dirs.tests('default_catalog.templates')) def _get_user_id(self, r): return r['user']['id'] def _get_role_name(self, r): return r['roles'][0]['name'] def _get_role_id(self, r): return r['roles'][0]['id'] def _get_project_id(self, r): return r['tenant']['id'] def _get_token_id(self, r): return r.result['access']['token']['id'] def assertNoRoles(self, r): self.assertEqual([], r['roles']) def assertValidErrorResponse(self, r): self.assertIsNotNone(r.result.get('error')) self.assertValidError(r.result['error']) self.assertEqual(r.result['error']['code'], r.status_code) def assertValidExtension(self, extension, expected): super(V2TestCase, self).assertValidExtension(extension) descriptions = [ext['description'] for ext in six.itervalues(expected)] description = extension.get('description') self.assertIsNotNone(description) self.assertIn(description, descriptions) self.assertIsNotNone(extension.get('links')) self.assertNotEmpty(extension.get('links')) for link in extension.get('links'): self.assertValidExtensionLink(link) def assertValidExtensionListResponse(self, r, expected): self.assertIsNotNone(r.result.get('extensions')) self.assertIsNotNone(r.result['extensions'].get('values')) self.assertNotEmpty(r.result['extensions'].get('values')) for extension in r.result['extensions']['values']: self.assertValidExtension(extension, expected) def assertValidExtensionResponse(self, r, expected): self.assertValidExtension(r.result.get('extension'), expected) def assertValidUser(self, user): super(V2TestCase, self).assertValidUser(user) self.assertNotIn('default_project_id', user) if 'tenantId' in user: # NOTE(morganfainberg): tenantId should never be "None", it gets # filtered out of the object if it is there. This is suspenders # and a belt check to avoid unintended regressions. self.assertIsNotNone(user.get('tenantId')) def assertValidAuthenticationResponse(self, r, require_service_catalog=False): self.assertIsNotNone(r.result.get('access')) self.assertIsNotNone(r.result['access'].get('token')) self.assertIsNotNone(r.result['access'].get('user')) # validate token self.assertIsNotNone(r.result['access']['token'].get('id')) self.assertIsNotNone(r.result['access']['token'].get('expires')) tenant = r.result['access']['token'].get('tenant') if tenant is not None: # validate tenant self.assertIsNotNone(tenant.get('id')) self.assertIsNotNone(tenant.get('name')) # validate user self.assertIsNotNone(r.result['access']['user'].get('id')) self.assertIsNotNone(r.result['access']['user'].get('name')) if require_service_catalog: # roles are only provided with a service catalog roles = r.result['access']['user'].get('roles') self.assertNotEmpty(roles) for role in roles: self.assertIsNotNone(role.get('name')) serviceCatalog = r.result['access'].get('serviceCatalog') # validate service catalog if require_service_catalog: self.assertIsNotNone(serviceCatalog) if serviceCatalog is not None: self.assertIsInstance(serviceCatalog, list) if require_service_catalog: self.assertNotEmpty(serviceCatalog) for service in r.result['access']['serviceCatalog']: # validate service self.assertIsNotNone(service.get('name')) self.assertIsNotNone(service.get('type')) # services contain at least one endpoint self.assertIsNotNone(service.get('endpoints')) self.assertNotEmpty(service['endpoints']) for endpoint in service['endpoints']: # validate service endpoint self.assertIsNotNone(endpoint.get('publicURL')) def assertValidTenantListResponse(self, r): self.assertIsNotNone(r.result.get('tenants')) self.assertNotEmpty(r.result['tenants']) for tenant in r.result['tenants']: self.assertValidTenant(tenant) self.assertIsNotNone(tenant.get('enabled')) self.assertIn(tenant.get('enabled'), [True, False]) def assertValidUserResponse(self, r): self.assertIsNotNone(r.result.get('user')) self.assertValidUser(r.result['user']) def assertValidTenantResponse(self, r): self.assertIsNotNone(r.result.get('tenant')) self.assertValidTenant(r.result['tenant']) def assertValidRoleListResponse(self, r): self.assertIsNotNone(r.result.get('roles')) self.assertNotEmpty(r.result['roles']) for role in r.result['roles']: self.assertValidRole(role) def assertValidVersion(self, version): super(V2TestCase, self).assertValidVersion(version) self.assertIsNotNone(version.get('links')) self.assertNotEmpty(version.get('links')) for link in version.get('links'): self.assertIsNotNone(link.get('rel')) self.assertIsNotNone(link.get('href')) self.assertIsNotNone(version.get('media-types')) self.assertNotEmpty(version.get('media-types')) for media in version.get('media-types'): self.assertIsNotNone(media.get('base')) self.assertIsNotNone(media.get('type')) def assertValidMultipleChoiceResponse(self, r): self.assertIsNotNone(r.result.get('versions')) self.assertIsNotNone(r.result['versions'].get('values')) self.assertNotEmpty(r.result['versions']['values']) for version in r.result['versions']['values']: self.assertValidVersion(version) def assertValidVersionResponse(self, r): self.assertValidVersion(r.result.get('version')) def assertValidEndpointListResponse(self, r): self.assertIsNotNone(r.result.get('endpoints')) self.assertNotEmpty(r.result['endpoints']) for endpoint in r.result['endpoints']: self.assertIsNotNone(endpoint.get('id')) self.assertIsNotNone(endpoint.get('name')) self.assertIsNotNone(endpoint.get('type')) self.assertIsNotNone(endpoint.get('publicURL')) self.assertIsNotNone(endpoint.get('internalURL')) self.assertIsNotNone(endpoint.get('adminURL')) def get_user_from_response(self, r): return r.result.get('user') def get_user_attribute_from_response(self, r, attribute_name): return r.result['user'][attribute_name] def test_service_crud_requires_auth(self): """Service CRUD should return unauthorized without an X-Auth-Token.""" # values here don't matter because it will be unauthorized before # they're checked (bug 1006822). service_path = '/v2.0/OS-KSADM/services/%s' % uuid.uuid4().hex service_body = { 'OS-KSADM:service': { 'name': uuid.uuid4().hex, 'type': uuid.uuid4().hex, }, } r = self.admin_request(method='GET', path='/v2.0/OS-KSADM/services', expected_status=http_client.UNAUTHORIZED) self.assertValidErrorResponse(r) r = self.admin_request(method='POST', path='/v2.0/OS-KSADM/services', body=service_body, expected_status=http_client.UNAUTHORIZED) self.assertValidErrorResponse(r) r = self.admin_request(method='GET', path=service_path, expected_status=http_client.UNAUTHORIZED) self.assertValidErrorResponse(r) r = self.admin_request(method='DELETE', path=service_path, expected_status=http_client.UNAUTHORIZED) self.assertValidErrorResponse(r) def test_user_role_list_requires_auth(self): """User role list return unauthorized without an X-Auth-Token.""" # values here don't matter because it will be unauthorized before # they're checked (bug 1006815). path = '/v2.0/tenants/%(tenant_id)s/users/%(user_id)s/roles' % { 'tenant_id': uuid.uuid4().hex, 'user_id': uuid.uuid4().hex, } r = self.admin_request(path=path, expected_status=http_client.UNAUTHORIZED) self.assertValidErrorResponse(r) def test_fetch_revocation_list_nonadmin_fails(self): self.admin_request( method='GET', path='/v2.0/tokens/revoked', expected_status=http_client.UNAUTHORIZED) def test_fetch_revocation_list_admin_200(self): token = self.get_scoped_token() r = self.admin_request( method='GET', path='/v2.0/tokens/revoked', token=token, expected_status=http_client.OK) self.assertValidRevocationListResponse(r) def assertValidRevocationListResponse(self, response): self.assertIsNotNone(response.result['signed']) def _fetch_parse_revocation_list(self): token1 = self.get_scoped_token() # TODO(morganfainberg): Because this is making a restful call to the # app a change to UTCNOW via mock.patch will not affect the returned # token. The only surefire way to ensure there is not a transient bug # based upon when the second token is issued is with a sleep. This # issue all stems from the limited resolution (no microseconds) on the # expiry time of tokens and the way revocation events utilizes token # expiry to revoke individual tokens. This is a stop-gap until all # associated issues with resolution on expiration and revocation events # are resolved. time.sleep(1) token2 = self.get_scoped_token() self.admin_request(method='DELETE', path='/v2.0/tokens/%s' % token2, token=token1) r = self.admin_request( method='GET', path='/v2.0/tokens/revoked', token=token1, expected_status=http_client.OK) signed_text = r.result['signed'] data_json = cms.cms_verify(signed_text, CONF.signing.certfile, CONF.signing.ca_certs) data = json.loads(data_json) return (data, token2) def test_fetch_revocation_list_md5(self): """Hash for tokens in revocation list and server config should match. If the server is configured for md5, then the revocation list has tokens hashed with MD5. """ # The default hash algorithm is md5. hash_algorithm = 'md5' (data, token) = self._fetch_parse_revocation_list() token_hash = cms.cms_hash_token(token, mode=hash_algorithm) self.assertThat(token_hash, matchers.Equals(data['revoked'][0]['id'])) def test_fetch_revocation_list_sha256(self): """Hash for tokens in revocation list and server config should match. If the server is configured for sha256, then the revocation list has tokens hashed with SHA256. """ hash_algorithm = 'sha256' self.config_fixture.config(group='token', hash_algorithm=hash_algorithm) (data, token) = self._fetch_parse_revocation_list() token_hash = cms.cms_hash_token(token, mode=hash_algorithm) self.assertThat(token_hash, matchers.Equals(data['revoked'][0]['id'])) def test_create_update_user_invalid_enabled_type(self): # Enforce usage of boolean for 'enabled' field token = self.get_scoped_token() # Test CREATE request r = self.admin_request( method='POST', path='/v2.0/users', body={ 'user': { 'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex, # In JSON, "true|false" are not boolean 'enabled': "true", }, }, token=token, expected_status=http_client.BAD_REQUEST) self.assertValidErrorResponse(r) # Test UPDATE request r = self.admin_request( method='PUT', path='/v2.0/users/%(user_id)s' % { 'user_id': self.user_foo['id'], }, body={ 'user': { # In JSON, "true|false" are not boolean 'enabled': "true", }, }, token=token, expected_status=http_client.BAD_REQUEST) self.assertValidErrorResponse(r) def test_authenticating_a_user_with_an_OSKSADM_password(self): token = self.get_scoped_token() username = uuid.uuid4().hex password = uuid.uuid4().hex # create the user r = self.admin_request( method='POST', path='/v2.0/users', body={ 'user': { 'name': username, 'OS-KSADM:password': password, 'enabled': True, }, }, token=token) # successfully authenticate self.public_request( method='POST', path='/v2.0/tokens', body={ 'auth': { 'passwordCredentials': { 'username': username, 'password': password, }, }, }, expected_status=http_client.OK) # ensure password doesn't leak user_id = r.result['user']['id'] r = self.admin_request( method='GET', path='/v2.0/users/%s' % user_id, token=token, expected_status=http_client.OK) self.assertNotIn('OS-KSADM:password', r.result['user']) def test_updating_a_user_with_an_OSKSADM_password(self): token = self.get_scoped_token() user_id = self.user_foo['id'] password = uuid.uuid4().hex # update the user self.admin_request( method='PUT', path='/v2.0/users/%s/OS-KSADM/password' % user_id, body={ 'user': { 'password': password, }, }, token=token, expected_status=http_client.OK) # successfully authenticate self.public_request( method='POST', path='/v2.0/tokens', body={ 'auth': { 'passwordCredentials': { 'username': self.user_foo['name'], 'password': password, }, }, }, expected_status=http_client.OK) class RevokeApiTestCase(V2TestCase): def config_overrides(self): super(RevokeApiTestCase, self).config_overrides() self.config_fixture.config( group='token', provider='pki', revoke_by_id=False) def test_fetch_revocation_list_admin_200(self): self.skipTest('Revoke API disables revocation_list.') def test_fetch_revocation_list_md5(self): self.skipTest('Revoke API disables revocation_list.') def test_fetch_revocation_list_sha256(self): self.skipTest('Revoke API disables revocation_list.') class TestFernetTokenProviderV2(RestfulTestCase): def setUp(self): super(TestFernetTokenProviderV2, self).setUp() self.useFixture(ksfixtures.KeyRepository(self.config_fixture)) # Add catalog data self.region = unit.new_region_ref() self.region_id = self.region['id'] self.catalog_api.create_region(self.region) self.service = unit.new_service_ref() self.service_id = self.service['id'] self.catalog_api.create_service(self.service_id, self.service) self.endpoint = unit.new_endpoint_ref(service_id=self.service_id, interface='public', region_id=self.region_id) self.endpoint_id = self.endpoint['id'] self.catalog_api.create_endpoint(self.endpoint_id, self.endpoint) def assertValidUnscopedTokenResponse(self, r): v2.unscoped_validator.validate(r.json['access']) def assertValidScopedTokenResponse(self, r): v2.scoped_validator.validate(r.json['access']) # Used by RestfulTestCase def _get_token_id(self, r): return r.result['access']['token']['id'] def new_project_ref(self): return {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex, 'description': uuid.uuid4().hex, 'domain_id': 'default', 'enabled': True} def config_overrides(self): super(TestFernetTokenProviderV2, self).config_overrides() self.config_fixture.config(group='token', provider='fernet') def test_authenticate_unscoped_token(self): unscoped_token = self.get_unscoped_token() # Fernet token must be of length 255 per usability requirements self.assertLess(len(unscoped_token), 255) def test_validate_unscoped_token(self): # Grab an admin token to validate with project_ref = self.new_project_ref() self.resource_api.create_project(project_ref['id'], project_ref) self.assignment_api.add_role_to_user_and_project(self.user_foo['id'], project_ref['id'], self.role_admin['id']) admin_token = self.get_scoped_token(tenant_id=project_ref['id']) unscoped_token = self.get_unscoped_token() path = ('/v2.0/tokens/%s' % unscoped_token) resp = self.admin_request( method='GET', path=path, token=admin_token, expected_status=http_client.OK) self.assertValidUnscopedTokenResponse(resp) def test_authenticate_scoped_token(self): project_ref = self.new_project_ref() self.resource_api.create_project(project_ref['id'], project_ref) self.assignment_api.add_role_to_user_and_project( self.user_foo['id'], project_ref['id'], self.role_service['id']) token = self.get_scoped_token(tenant_id=project_ref['id']) # Fernet token must be of length 255 per usability requirements self.assertLess(len(token), 255) def test_validate_scoped_token(self): project_ref = self.new_project_ref() self.resource_api.create_project(project_ref['id'], project_ref) self.assignment_api.add_role_to_user_and_project(self.user_foo['id'], project_ref['id'], self.role_admin['id']) project2_ref = self.new_project_ref() self.resource_api.create_project(project2_ref['id'], project2_ref) self.assignment_api.add_role_to_user_and_project( self.user_foo['id'], project2_ref['id'], self.role_member['id']) admin_token = self.get_scoped_token(tenant_id=project_ref['id']) member_token = self.get_scoped_token(tenant_id=project2_ref['id']) path = ('/v2.0/tokens/%s?belongsTo=%s' % (member_token, project2_ref['id'])) # Validate token belongs to project resp = self.admin_request( method='GET', path=path, token=admin_token, expected_status=http_client.OK) self.assertValidScopedTokenResponse(resp) def test_token_authentication_and_validation(self): """Test token authentication for Fernet token provider. Verify that token authentication returns validate response code and valid token belongs to project. """ project_ref = self.new_project_ref() self.resource_api.create_project(project_ref['id'], project_ref) unscoped_token = self.get_unscoped_token() self.assignment_api.add_role_to_user_and_project(self.user_foo['id'], project_ref['id'], self.role_admin['id']) r = self.public_request( method='POST', path='/v2.0/tokens', body={ 'auth': { 'tenantName': project_ref['name'], 'token': { 'id': unscoped_token.encode('ascii') } } }, expected_status=http_client.OK) token_id = self._get_token_id(r) path = ('/v2.0/tokens/%s?belongsTo=%s' % (token_id, project_ref['id'])) # Validate token belongs to project resp = self.admin_request( method='GET', path=path, token=self.get_admin_token(), expected_status=http_client.OK) self.assertValidScopedTokenResponse(resp) def test_rescoped_tokens_maintain_original_expiration(self): project_ref = self.new_project_ref() self.resource_api.create_project(project_ref['id'], project_ref) self.assignment_api.add_role_to_user_and_project(self.user_foo['id'], project_ref['id'], self.role_admin['id']) resp = self.public_request( method='POST', path='/v2.0/tokens', body={ 'auth': { 'tenantName': project_ref['name'], 'passwordCredentials': { 'username': self.user_foo['name'], 'password': self.user_foo['password'] } } }, # NOTE(lbragstad): This test may need to be refactored if Keystone # decides to disallow rescoping using a scoped token. expected_status=http_client.OK) original_token = resp.result['access']['token']['id'] original_expiration = resp.result['access']['token']['expires'] resp = self.public_request( method='POST', path='/v2.0/tokens', body={ 'auth': { 'tenantName': project_ref['name'], 'token': { 'id': original_token, } } }, expected_status=http_client.OK) rescoped_token = resp.result['access']['token']['id'] rescoped_expiration = resp.result['access']['token']['expires'] self.assertNotEqual(original_token, rescoped_token) self.assertEqual(original_expiration, rescoped_expiration) self.assertValidScopedTokenResponse(resp) keystone-9.0.0/keystone/tests/unit/test_driver_hints.py0000664000567000056710000000435312701407102024614 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common import driver_hints from keystone.tests.unit import core as test class ListHintsTests(test.TestCase): def test_create_iterate_satisfy(self): hints = driver_hints.Hints() hints.add_filter('t1', 'data1') hints.add_filter('t2', 'data2') self.assertEqual(2, len(hints.filters)) filter = hints.get_exact_filter_by_name('t1') self.assertEqual('t1', filter['name']) self.assertEqual('data1', filter['value']) self.assertEqual('equals', filter['comparator']) self.assertFalse(filter['case_sensitive']) hints.filters.remove(filter) filter_count = 0 for filter in hints.filters: filter_count += 1 self.assertEqual('t2', filter['name']) self.assertEqual(1, filter_count) def test_multiple_creates(self): hints = driver_hints.Hints() hints.add_filter('t1', 'data1') hints.add_filter('t2', 'data2') self.assertEqual(2, len(hints.filters)) hints2 = driver_hints.Hints() hints2.add_filter('t4', 'data1') hints2.add_filter('t5', 'data2') self.assertEqual(2, len(hints.filters)) def test_limits(self): hints = driver_hints.Hints() self.assertIsNone(hints.limit) hints.set_limit(10) self.assertEqual(10, hints.limit['limit']) self.assertFalse(hints.limit['truncated']) hints.set_limit(11) self.assertEqual(11, hints.limit['limit']) self.assertFalse(hints.limit['truncated']) hints.set_limit(10, truncated=True) self.assertEqual(10, hints.limit['limit']) self.assertTrue(hints.limit['truncated']) keystone-9.0.0/keystone/tests/unit/test_wsgi.py0000664000567000056710000005655712701407102023102 0ustar jenkinsjenkins00000000000000# encoding: utf-8 # # Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import gettext import socket import uuid import eventlet import mock import oslo_i18n from oslo_serialization import jsonutils import six from six.moves import http_client from testtools import matchers import webob from keystone.common import environment from keystone.common import wsgi from keystone import exception from keystone.tests import unit class FakeApp(wsgi.Application): def index(self, context): return {'a': 'b'} class FakeAttributeCheckerApp(wsgi.Application): def index(self, context): return context['query_string'] def assert_attribute(self, body, attr): """Asserts that the given request has a certain attribute.""" ref = jsonutils.loads(body) self._require_attribute(ref, attr) def assert_attributes(self, body, attr): """Asserts that the given request has a certain set attributes.""" ref = jsonutils.loads(body) self._require_attributes(ref, attr) class RouterTest(unit.TestCase): def setUp(self): self.router = wsgi.RoutersBase() super(RouterTest, self).setUp() def test_invalid_status(self): fake_mapper = uuid.uuid4().hex fake_controller = uuid.uuid4().hex fake_path = uuid.uuid4().hex fake_rel = uuid.uuid4().hex self.assertRaises(exception.Error, self.router._add_resource, fake_mapper, fake_controller, fake_path, fake_rel, status=uuid.uuid4().hex) class BaseWSGITest(unit.TestCase): def setUp(self): self.app = FakeApp() super(BaseWSGITest, self).setUp() def _make_request(self, url='/'): req = webob.Request.blank(url) args = {'action': 'index', 'controller': None} req.environ['wsgiorg.routing_args'] = [None, args] return req class ApplicationTest(BaseWSGITest): def test_response_content_type(self): req = self._make_request() resp = req.get_response(self.app) self.assertEqual('application/json', resp.content_type) def test_query_string_available(self): class FakeApp(wsgi.Application): def index(self, context): return context['query_string'] req = self._make_request(url='/?1=2') resp = req.get_response(FakeApp()) self.assertEqual({'1': '2'}, jsonutils.loads(resp.body)) def test_headers_available(self): class FakeApp(wsgi.Application): def index(self, context): return context['headers'] app = FakeApp() req = self._make_request(url='/?1=2') req.headers['X-Foo'] = "bar" resp = req.get_response(app) self.assertIn('X-Foo', eval(resp.body)) def test_render_response(self): data = {'attribute': 'value'} body = b'{"attribute": "value"}' resp = wsgi.render_response(body=data) self.assertEqual('200 OK', resp.status) self.assertEqual(http_client.OK, resp.status_int) self.assertEqual(body, resp.body) self.assertEqual('X-Auth-Token', resp.headers.get('Vary')) self.assertEqual(str(len(body)), resp.headers.get('Content-Length')) def test_render_response_custom_status(self): resp = wsgi.render_response( status=(http_client.NOT_IMPLEMENTED, 'Not Implemented')) self.assertEqual('501 Not Implemented', resp.status) self.assertEqual(http_client.NOT_IMPLEMENTED, resp.status_int) def test_successful_require_attribute(self): app = FakeAttributeCheckerApp() req = self._make_request(url='/?1=2') resp = req.get_response(app) app.assert_attribute(resp.body, '1') def test_require_attribute_fail_if_attribute_not_present(self): app = FakeAttributeCheckerApp() req = self._make_request(url='/?1=2') resp = req.get_response(app) self.assertRaises(exception.ValidationError, app.assert_attribute, resp.body, 'a') def test_successful_require_multiple_attributes(self): app = FakeAttributeCheckerApp() req = self._make_request(url='/?a=1&b=2') resp = req.get_response(app) app.assert_attributes(resp.body, ['a', 'b']) def test_attribute_missing_from_request(self): app = FakeAttributeCheckerApp() req = self._make_request(url='/?a=1&b=2') resp = req.get_response(app) ex = self.assertRaises(exception.ValidationError, app.assert_attributes, resp.body, ['a', 'missing_attribute']) self.assertThat(six.text_type(ex), matchers.Contains('missing_attribute')) def test_no_required_attributes_present(self): app = FakeAttributeCheckerApp() req = self._make_request(url='/') resp = req.get_response(app) ex = self.assertRaises(exception.ValidationError, app.assert_attributes, resp.body, ['missing_attribute1', 'missing_attribute2']) self.assertThat(six.text_type(ex), matchers.Contains('missing_attribute1')) self.assertThat(six.text_type(ex), matchers.Contains('missing_attribute2')) def test_render_response_custom_headers(self): resp = wsgi.render_response(headers=[('Custom-Header', 'Some-Value')]) self.assertEqual('Some-Value', resp.headers.get('Custom-Header')) self.assertEqual('X-Auth-Token', resp.headers.get('Vary')) def test_render_response_non_str_headers_converted(self): resp = wsgi.render_response( headers=[('Byte-Header', 'Byte-Value'), (u'Unicode-Header', u'Unicode-Value')]) # assert that all headers are identified. self.assertThat(resp.headers, matchers.HasLength(4)) self.assertEqual('Unicode-Value', resp.headers.get('Unicode-Header')) # assert that unicode value is converted, the expected type is str # on both python2 and python3. self.assertEqual(str, type(resp.headers.get('Unicode-Header'))) def test_render_response_no_body(self): resp = wsgi.render_response() self.assertEqual('204 No Content', resp.status) self.assertEqual(http_client.NO_CONTENT, resp.status_int) self.assertEqual(b'', resp.body) self.assertEqual('0', resp.headers.get('Content-Length')) self.assertIsNone(resp.headers.get('Content-Type')) def test_render_response_head_with_body(self): resp = wsgi.render_response({'id': uuid.uuid4().hex}, method='HEAD') self.assertEqual(http_client.OK, resp.status_int) self.assertEqual(b'', resp.body) self.assertNotEqual('0', resp.headers.get('Content-Length')) self.assertEqual('application/json', resp.headers.get('Content-Type')) def test_application_local_config(self): class FakeApp(wsgi.Application): def __init__(self, *args, **kwargs): self.kwargs = kwargs app = FakeApp.factory({}, testkey="test") self.assertIn("testkey", app.kwargs) self.assertEqual("test", app.kwargs["testkey"]) def test_render_exception(self): e = exception.Unauthorized(message=u'\u7f51\u7edc') resp = wsgi.render_exception(e) self.assertEqual(http_client.UNAUTHORIZED, resp.status_int) def test_render_exception_host(self): e = exception.Unauthorized(message=u'\u7f51\u7edc') req = self._make_request(url='/') context = {'host_url': 'http://%s:5000' % uuid.uuid4().hex, 'environment': req.environ} resp = wsgi.render_exception(e, context=context) self.assertEqual(http_client.UNAUTHORIZED, resp.status_int) def test_improperly_encoded_params(self): class FakeApp(wsgi.Application): def index(self, context): return context['query_string'] # this is high bit set ASCII, copy & pasted from Windows. # aka code page 1252. It is not valid UTF8. req = self._make_request(url='/?name=nonexit%E8nt') self.assertRaises(exception.ValidationError, req.get_response, FakeApp()) def test_properly_encoded_params(self): class FakeApp(wsgi.Application): def index(self, context): return context['query_string'] # nonexitènt encoded as UTF-8 req = self._make_request(url='/?name=nonexit%C3%A8nt') resp = req.get_response(FakeApp()) self.assertEqual({'name': u'nonexit\xe8nt'}, jsonutils.loads(resp.body)) def test_base_url(self): class FakeApp(wsgi.Application): def index(self, context): return self.base_url(context, 'public') req = self._make_request(url='/') # NOTE(gyee): according to wsgiref, if HTTP_HOST is present in the # request environment, it will be used to construct the base url. # SERVER_NAME and SERVER_PORT will be ignored. These are standard # WSGI environment variables populated by the webserver. req.environ.update({ 'SCRIPT_NAME': '/identity', 'SERVER_NAME': '1.2.3.4', 'wsgi.url_scheme': 'http', 'SERVER_PORT': '80', 'HTTP_HOST': '1.2.3.4', }) resp = req.get_response(FakeApp()) self.assertEqual(b"http://1.2.3.4/identity", resp.body) # if HTTP_HOST is absent, SERVER_NAME and SERVER_PORT will be used req = self._make_request(url='/') del req.environ['HTTP_HOST'] req.environ.update({ 'SCRIPT_NAME': '/identity', 'SERVER_NAME': '1.1.1.1', 'wsgi.url_scheme': 'http', 'SERVER_PORT': '1234', }) resp = req.get_response(FakeApp()) self.assertEqual(b"http://1.1.1.1:1234/identity", resp.body) # make sure keystone normalize the standard HTTP port 80 by stripping # it req = self._make_request(url='/') req.environ.update({'HTTP_HOST': 'foo:80', 'SCRIPT_NAME': '/identity'}) resp = req.get_response(FakeApp()) self.assertEqual(b"http://foo/identity", resp.body) # make sure keystone normalize the standard HTTPS port 443 by stripping # it req = self._make_request(url='/') req.environ.update({'HTTP_HOST': 'foo:443', 'SCRIPT_NAME': '/identity', 'wsgi.url_scheme': 'https'}) resp = req.get_response(FakeApp()) self.assertEqual(b"https://foo/identity", resp.body) # make sure non-standard port is preserved req = self._make_request(url='/') req.environ.update({'HTTP_HOST': 'foo:1234', 'SCRIPT_NAME': '/identity'}) resp = req.get_response(FakeApp()) self.assertEqual(b"http://foo:1234/identity", resp.body) # make sure version portion of the SCRIPT_NAME, '/v2.0', is stripped # from base url req = self._make_request(url='/') req.environ.update({'HTTP_HOST': 'foo:80', 'SCRIPT_NAME': '/bar/identity/v2.0'}) resp = req.get_response(FakeApp()) self.assertEqual(b"http://foo/bar/identity", resp.body) # make sure version portion of the SCRIPT_NAME, '/v3' is stripped from # base url req = self._make_request(url='/') req.environ.update({'HTTP_HOST': 'foo:80', 'SCRIPT_NAME': '/identity/v3'}) resp = req.get_response(FakeApp()) self.assertEqual(b"http://foo/identity", resp.body) class ExtensionRouterTest(BaseWSGITest): def test_extensionrouter_local_config(self): class FakeRouter(wsgi.ExtensionRouter): def __init__(self, *args, **kwargs): self.kwargs = kwargs factory = FakeRouter.factory({}, testkey="test") app = factory(self.app) self.assertIn("testkey", app.kwargs) self.assertEqual("test", app.kwargs["testkey"]) class MiddlewareTest(BaseWSGITest): def test_middleware_request(self): class FakeMiddleware(wsgi.Middleware): def process_request(self, req): req.environ['fake_request'] = True return req req = self._make_request() resp = FakeMiddleware(None)(req) self.assertIn('fake_request', resp.environ) def test_middleware_response(self): class FakeMiddleware(wsgi.Middleware): def process_response(self, request, response): response.environ = {} response.environ['fake_response'] = True return response req = self._make_request() resp = FakeMiddleware(self.app)(req) self.assertIn('fake_response', resp.environ) def test_middleware_bad_request(self): class FakeMiddleware(wsgi.Middleware): def process_response(self, request, response): raise exception.Unauthorized() req = self._make_request() req.environ['REMOTE_ADDR'] = '127.0.0.1' resp = FakeMiddleware(self.app)(req) self.assertEqual(exception.Unauthorized.code, resp.status_int) def test_middleware_type_error(self): class FakeMiddleware(wsgi.Middleware): def process_response(self, request, response): raise TypeError() req = self._make_request() req.environ['REMOTE_ADDR'] = '127.0.0.1' resp = FakeMiddleware(self.app)(req) # This is a validationerror type self.assertEqual(exception.ValidationError.code, resp.status_int) def test_middleware_exception_error(self): exception_str = b'EXCEPTIONERROR' class FakeMiddleware(wsgi.Middleware): def process_response(self, request, response): raise exception.UnexpectedError(exception_str) def do_request(): req = self._make_request() resp = FakeMiddleware(self.app)(req) self.assertEqual(exception.UnexpectedError.code, resp.status_int) return resp # Exception data should not be in the message when insecure_debug is # False self.config_fixture.config(debug=False, insecure_debug=False) self.assertNotIn(exception_str, do_request().body) # Exception data should be in the message when insecure_debug is True self.config_fixture.config(debug=True, insecure_debug=True) self.assertIn(exception_str, do_request().body) class LocalizedResponseTest(unit.TestCase): def test_request_match_default(self): # The default language if no Accept-Language is provided is None req = webob.Request.blank('/') self.assertIsNone(wsgi.best_match_language(req)) @mock.patch.object(oslo_i18n, 'get_available_languages') def test_request_match_language_expected(self, mock_gal): # If Accept-Language is a supported language, best_match_language() # returns it. language = uuid.uuid4().hex mock_gal.return_value = [language] req = webob.Request.blank('/', headers={'Accept-Language': language}) self.assertEqual(language, wsgi.best_match_language(req)) @mock.patch.object(oslo_i18n, 'get_available_languages') def test_request_match_language_unexpected(self, mock_gal): # If Accept-Language is a language we do not support, # best_match_language() returns None. supported_language = uuid.uuid4().hex mock_gal.return_value = [supported_language] request_language = uuid.uuid4().hex req = webob.Request.blank( '/', headers={'Accept-Language': request_language}) self.assertIsNone(wsgi.best_match_language(req)) def test_static_translated_string_is_lazy_translatable(self): # Statically created message strings are an object that can get # lazy-translated rather than a regular string. self.assertNotEqual(six.text_type, type(exception.Unauthorized.message_format)) @mock.patch.object(oslo_i18n, 'get_available_languages') def test_get_localized_response(self, mock_gal): # If the request has the Accept-Language set to a supported language # and an exception is raised by the application that is translatable # then the response will have the translated message. language = uuid.uuid4().hex mock_gal.return_value = [language] # The arguments for the xlated message format have to match the args # for the chosen exception (exception.NotFound) xlated_msg_fmt = "Xlated NotFound, %(target)s." # Fake out gettext.translation() to return a translator for our # expected language and a passthrough translator for other langs. def fake_translation(*args, **kwargs): class IdentityTranslator(object): def ugettext(self, msgid): return msgid gettext = ugettext class LangTranslator(object): def ugettext(self, msgid): if msgid == exception.NotFound.message_format: return xlated_msg_fmt return msgid gettext = ugettext if language in kwargs.get('languages', []): return LangTranslator() return IdentityTranslator() with mock.patch.object(gettext, 'translation', side_effect=fake_translation) as xlation_mock: target = uuid.uuid4().hex # Fake app raises NotFound exception to simulate Keystone raising. class FakeApp(wsgi.Application): def index(self, context): raise exception.NotFound(target=target) # Make the request with Accept-Language on the app, expect an error # response with the translated message. req = webob.Request.blank('/') args = {'action': 'index', 'controller': None} req.environ['wsgiorg.routing_args'] = [None, args] req.headers['Accept-Language'] = language resp = req.get_response(FakeApp()) # Assert that the translated message appears in the response. exp_msg = xlated_msg_fmt % dict(target=target) self.assertThat(resp.json['error']['message'], matchers.Equals(exp_msg)) self.assertThat(xlation_mock.called, matchers.Equals(True)) class ServerTest(unit.TestCase): def setUp(self): super(ServerTest, self).setUp() self.host = '127.0.0.1' self.port = '1234' @mock.patch('eventlet.listen') @mock.patch('socket.getaddrinfo') def test_keepalive_unset(self, mock_getaddrinfo, mock_listen): mock_getaddrinfo.return_value = [(1, 2, 3, 4, 5)] mock_sock_dup = mock_listen.return_value.dup.return_value server = environment.Server(mock.MagicMock(), host=self.host, port=self.port) server.start() self.addCleanup(server.stop) self.assertTrue(mock_listen.called) self.assertFalse(mock_sock_dup.setsockopt.called) @mock.patch('eventlet.listen') @mock.patch('socket.getaddrinfo') def test_keepalive_set(self, mock_getaddrinfo, mock_listen): mock_getaddrinfo.return_value = [(1, 2, 3, 4, 5)] mock_sock_dup = mock_listen.return_value.dup.return_value server = environment.Server(mock.MagicMock(), host=self.host, port=self.port, keepalive=True) server.start() self.addCleanup(server.stop) mock_sock_dup.setsockopt.assert_called_once_with(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) self.assertTrue(mock_listen.called) @mock.patch('eventlet.listen') @mock.patch('socket.getaddrinfo') def test_keepalive_and_keepidle_set(self, mock_getaddrinfo, mock_listen): mock_getaddrinfo.return_value = [(1, 2, 3, 4, 5)] mock_sock_dup = mock_listen.return_value.dup.return_value server = environment.Server(mock.MagicMock(), host=self.host, port=self.port, keepalive=True, keepidle=1) server.start() self.addCleanup(server.stop) if hasattr(socket, 'TCP_KEEPIDLE'): self.assertEqual(2, mock_sock_dup.setsockopt.call_count) # Test the last set of call args i.e. for the keepidle mock_sock_dup.setsockopt.assert_called_with(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 1) else: self.assertEqual(1, mock_sock_dup.setsockopt.call_count) self.assertTrue(mock_listen.called) def test_client_socket_timeout(self): # mocking server method of eventlet.wsgi to check it is called with # configured 'client_socket_timeout' value. for socket_timeout in range(1, 10): self.config_fixture.config(group='eventlet_server', client_socket_timeout=socket_timeout) server = environment.Server(mock.MagicMock(), host=self.host, port=self.port) with mock.patch.object(eventlet.wsgi, 'server') as mock_server: fake_application = uuid.uuid4().hex fake_socket = uuid.uuid4().hex server._run(fake_application, fake_socket) mock_server.assert_called_once_with( fake_socket, fake_application, debug=mock.ANY, socket_timeout=socket_timeout, log=mock.ANY, keepalive=mock.ANY) def test_wsgi_keep_alive(self): # mocking server method of eventlet.wsgi to check it is called with # configured 'wsgi_keep_alive' value. wsgi_keepalive = False self.config_fixture.config(group='eventlet_server', wsgi_keep_alive=wsgi_keepalive) server = environment.Server(mock.MagicMock(), host=self.host, port=self.port) with mock.patch.object(eventlet.wsgi, 'server') as mock_server: fake_application = uuid.uuid4().hex fake_socket = uuid.uuid4().hex server._run(fake_application, fake_socket) mock_server.assert_called_once_with(fake_socket, fake_application, debug=mock.ANY, socket_timeout=mock.ANY, log=mock.ANY, keepalive=wsgi_keepalive) keystone-9.0.0/keystone/tests/unit/policy/0000775000567000056710000000000012701407246022006 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/policy/__init__.py0000664000567000056710000000000012701407102024074 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/policy/test_backends.py0000664000567000056710000000575712701407102025176 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from keystone import exception from keystone.tests import unit class PolicyTests(object): def test_create(self): ref = unit.new_policy_ref() res = self.policy_api.create_policy(ref['id'], ref) self.assertDictEqual(ref, res) def test_get(self): ref = unit.new_policy_ref() res = self.policy_api.create_policy(ref['id'], ref) res = self.policy_api.get_policy(ref['id']) self.assertDictEqual(ref, res) def test_list(self): ref = unit.new_policy_ref() self.policy_api.create_policy(ref['id'], ref) res = self.policy_api.list_policies() res = [x for x in res if x['id'] == ref['id']][0] self.assertDictEqual(ref, res) def test_update(self): ref = unit.new_policy_ref() self.policy_api.create_policy(ref['id'], ref) orig = ref ref = unit.new_policy_ref() # (cannot change policy ID) self.assertRaises(exception.ValidationError, self.policy_api.update_policy, orig['id'], ref) ref['id'] = orig['id'] res = self.policy_api.update_policy(orig['id'], ref) self.assertDictEqual(ref, res) def test_delete(self): ref = unit.new_policy_ref() self.policy_api.create_policy(ref['id'], ref) self.policy_api.delete_policy(ref['id']) self.assertRaises(exception.PolicyNotFound, self.policy_api.delete_policy, ref['id']) self.assertRaises(exception.PolicyNotFound, self.policy_api.get_policy, ref['id']) res = self.policy_api.list_policies() self.assertFalse(len([x for x in res if x['id'] == ref['id']])) def test_get_policy_returns_not_found(self): self.assertRaises(exception.PolicyNotFound, self.policy_api.get_policy, uuid.uuid4().hex) def test_update_policy_returns_not_found(self): ref = unit.new_policy_ref() self.assertRaises(exception.PolicyNotFound, self.policy_api.update_policy, ref['id'], ref) def test_delete_policy_returns_not_found(self): self.assertRaises(exception.PolicyNotFound, self.policy_api.delete_policy, uuid.uuid4().hex) keystone-9.0.0/keystone/tests/unit/test_v2_controller.py0000664000567000056710000001577312701407102024716 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import uuid from testtools import matchers from keystone.assignment import controllers as assignment_controllers from keystone import exception from keystone.resource import controllers as resource_controllers from keystone.tests import unit from keystone.tests.unit import default_fixtures from keystone.tests.unit.ksfixtures import database _ADMIN_CONTEXT = {'is_admin': True, 'query_string': {}} class TenantTestCase(unit.TestCase): """Tests for the V2 Tenant controller. These tests exercise :class:`keystone.assignment.controllers.Tenant`. """ def setUp(self): super(TenantTestCase, self).setUp() self.useFixture(database.Database()) self.load_backends() self.load_fixtures(default_fixtures) self.tenant_controller = resource_controllers.Tenant() self.assignment_tenant_controller = ( assignment_controllers.TenantAssignment()) self.assignment_role_controller = ( assignment_controllers.RoleAssignmentV2()) def test_get_project_users_no_user(self): """get_project_users when user doesn't exist. When a user that's not known to `identity` has a role on a project, then `get_project_users` just skips that user. """ project_id = self.tenant_bar['id'] orig_project_users = ( self.assignment_tenant_controller.get_project_users(_ADMIN_CONTEXT, project_id)) # Assign a role to a user that doesn't exist to the `bar` project. user_id = uuid.uuid4().hex self.assignment_role_controller.add_role_to_user( _ADMIN_CONTEXT, user_id, self.role_other['id'], project_id) new_project_users = ( self.assignment_tenant_controller.get_project_users(_ADMIN_CONTEXT, project_id)) # The new user isn't included in the result, so no change. # asserting that the expected values appear in the list, # without asserting the order of the results self.assertEqual(sorted(orig_project_users), sorted(new_project_users)) def test_list_projects_default_domain(self): """Test that list projects only returns those in the default domain.""" domain = unit.new_domain_ref() self.resource_api.create_domain(domain['id'], domain) project1 = unit.new_project_ref(domain_id=domain['id']) self.resource_api.create_project(project1['id'], project1) # Check the real total number of projects, we should have the: # - tenants in the default fixtures # - the project representing the default domain # - the project representing the domain we created above # - the project we created above refs = self.resource_api.list_projects() self.assertThat( refs, matchers.HasLength(len(default_fixtures.TENANTS) + 3)) # Now list all projects using the v2 API - we should only get # back those in the default features, since only those are in the # default domain. refs = self.tenant_controller.get_all_projects(_ADMIN_CONTEXT) self.assertEqual(len(default_fixtures.TENANTS), len(refs['tenants'])) for tenant in default_fixtures.TENANTS: tenant_copy = tenant.copy() tenant_copy.pop('domain_id') tenant_copy.pop('parent_id') tenant_copy.pop('is_domain') self.assertIn(tenant_copy, refs['tenants']) def _create_is_domain_project(self): project = unit.new_project_ref(is_domain=True) project_ref = self.resource_api.create_project(project['id'], project) return self.tenant_controller.v3_to_v2_project(project_ref) def test_get_is_domain_project_not_found(self): """Test that get project does not return is_domain projects.""" project = self._create_is_domain_project() context = copy.deepcopy(_ADMIN_CONTEXT) context['query_string']['name'] = project['name'] self.assertRaises( exception.ProjectNotFound, self.tenant_controller.get_all_projects, context) context = copy.deepcopy(_ADMIN_CONTEXT) context['query_string']['name'] = project['id'] self.assertRaises( exception.ProjectNotFound, self.tenant_controller.get_all_projects, context) def test_create_is_domain_project_fails(self): """Test that the creation of a project acting as a domain fails.""" project = {'name': uuid.uuid4().hex, 'domain_id': 'default', 'is_domain': True} self.assertRaises( exception.ValidationError, self.tenant_controller.create_project, _ADMIN_CONTEXT, project) def test_create_project_passing_is_domain_false_fails(self): """Test that passing is_domain=False is not allowed.""" project = {'name': uuid.uuid4().hex, 'domain_id': 'default', 'is_domain': False} self.assertRaises( exception.ValidationError, self.tenant_controller.create_project, _ADMIN_CONTEXT, project) def test_update_is_domain_project_not_found(self): """Test that update is_domain project is not allowed in v2.""" project = self._create_is_domain_project() project['name'] = uuid.uuid4().hex self.assertRaises( exception.ProjectNotFound, self.tenant_controller.update_project, _ADMIN_CONTEXT, project['id'], project) def test_delete_is_domain_project_not_found(self): """Test that delete is_domain project is not allowed in v2.""" project = self._create_is_domain_project() self.assertRaises( exception.ProjectNotFound, self.tenant_controller.delete_project, _ADMIN_CONTEXT, project['id']) def test_list_is_domain_project_not_found(self): """Test v2 get_all_projects having projects that act as a domain. In v2 no project with the is_domain flag enabled should be returned. """ project1 = self._create_is_domain_project() project2 = self._create_is_domain_project() refs = self.tenant_controller.get_all_projects(_ADMIN_CONTEXT) projects = refs.get('tenants') self.assertNotIn(project1, projects) self.assertNotIn(project2, projects) keystone-9.0.0/keystone/tests/unit/test_ssl.py0000664000567000056710000001627312701407102022721 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import ssl from oslo_config import cfg from keystone.common import environment from keystone.tests import unit from keystone.tests.unit.ksfixtures import appserver CONF = cfg.CONF CERTDIR = unit.dirs.root('examples', 'pki', 'certs') KEYDIR = unit.dirs.root('examples', 'pki', 'private') CERT = os.path.join(CERTDIR, 'ssl_cert.pem') KEY = os.path.join(KEYDIR, 'ssl_key.pem') CA = os.path.join(CERTDIR, 'cacert.pem') CLIENT = os.path.join(CERTDIR, 'middleware.pem') class SSLTestCase(unit.TestCase): def setUp(self): super(SSLTestCase, self).setUp() raise self.skipTest('SSL Version and Ciphers cannot be configured ' 'with eventlet, some platforms have disabled ' 'SSLv3. See bug 1381365.') # NOTE(morganfainberg): It has been determined that this # will not be fixed. These tests should be re-enabled for the full # functional test suite when run against an SSL terminated # endpoint. Some distributions/environments have patched OpenSSL to # not have SSLv3 at all due to POODLE and this causes differing # behavior depending on platform. See bug 1381365 for more information. # NOTE(jamespage): # Deal with more secure certificate chain verification # introduced in python 2.7.9 under PEP-0476 # https://github.com/python/peps/blob/master/pep-0476.txt self.context = None if hasattr(ssl, '_create_unverified_context'): self.context = ssl._create_unverified_context() self.load_backends() def get_HTTPSConnection(self, *args): """Simple helper to configure HTTPSConnection objects.""" if self.context: return environment.httplib.HTTPSConnection( *args, context=self.context ) else: return environment.httplib.HTTPSConnection(*args) def test_1way_ssl_ok(self): """Make sure both public and admin API work with 1-way SSL.""" paste_conf = self._paste_config('keystone') ssl_kwargs = dict(cert=CERT, key=KEY, ca=CA) # Verify Admin with appserver.AppServer(paste_conf, appserver.ADMIN, **ssl_kwargs): conn = self.get_HTTPSConnection( '127.0.0.1', CONF.eventlet_server.admin_port) conn.request('GET', '/') resp = conn.getresponse() self.assertEqual(300, resp.status) # Verify Public with appserver.AppServer(paste_conf, appserver.MAIN, **ssl_kwargs): conn = self.get_HTTPSConnection( '127.0.0.1', CONF.eventlet_server.public_port) conn.request('GET', '/') resp = conn.getresponse() self.assertEqual(300, resp.status) def test_2way_ssl_ok(self): """Make sure both public and admin API work with 2-way SSL. Requires client certificate. """ paste_conf = self._paste_config('keystone') ssl_kwargs = dict(cert=CERT, key=KEY, ca=CA, cert_required=True) # Verify Admin with appserver.AppServer(paste_conf, appserver.ADMIN, **ssl_kwargs): conn = self.get_HTTPSConnection( '127.0.0.1', CONF.eventlet_server.admin_port, CLIENT, CLIENT) conn.request('GET', '/') resp = conn.getresponse() self.assertEqual(300, resp.status) # Verify Public with appserver.AppServer(paste_conf, appserver.MAIN, **ssl_kwargs): conn = self.get_HTTPSConnection( '127.0.0.1', CONF.eventlet_server.public_port, CLIENT, CLIENT) conn.request('GET', '/') resp = conn.getresponse() self.assertEqual(300, resp.status) def test_1way_ssl_with_ipv6_ok(self): """Make sure both public and admin API work with 1-way ipv6 & SSL.""" self.skip_if_no_ipv6() paste_conf = self._paste_config('keystone') ssl_kwargs = dict(cert=CERT, key=KEY, ca=CA, host="::1") # Verify Admin with appserver.AppServer(paste_conf, appserver.ADMIN, **ssl_kwargs): conn = self.get_HTTPSConnection( '::1', CONF.eventlet_server.admin_port) conn.request('GET', '/') resp = conn.getresponse() self.assertEqual(300, resp.status) # Verify Public with appserver.AppServer(paste_conf, appserver.MAIN, **ssl_kwargs): conn = self.get_HTTPSConnection( '::1', CONF.eventlet_server.public_port) conn.request('GET', '/') resp = conn.getresponse() self.assertEqual(300, resp.status) def test_2way_ssl_with_ipv6_ok(self): """Make sure both public and admin API work with 2-way ipv6 & SSL. Requires client certificate. """ self.skip_if_no_ipv6() paste_conf = self._paste_config('keystone') ssl_kwargs = dict(cert=CERT, key=KEY, ca=CA, cert_required=True, host="::1") # Verify Admin with appserver.AppServer(paste_conf, appserver.ADMIN, **ssl_kwargs): conn = self.get_HTTPSConnection( '::1', CONF.eventlet_server.admin_port, CLIENT, CLIENT) conn.request('GET', '/') resp = conn.getresponse() self.assertEqual(300, resp.status) # Verify Public with appserver.AppServer(paste_conf, appserver.MAIN, **ssl_kwargs): conn = self.get_HTTPSConnection( '::1', CONF.eventlet_server.public_port, CLIENT, CLIENT) conn.request('GET', '/') resp = conn.getresponse() self.assertEqual(300, resp.status) def test_2way_ssl_fail(self): """Expect to fail when client does not present proper certificate.""" paste_conf = self._paste_config('keystone') ssl_kwargs = dict(cert=CERT, key=KEY, ca=CA, cert_required=True) # Verify Admin with appserver.AppServer(paste_conf, appserver.ADMIN, **ssl_kwargs): conn = self.get_HTTPSConnection( '127.0.0.1', CONF.eventlet_server.admin_port) try: conn.request('GET', '/') self.fail('Admin API shoulda failed with SSL handshake!') except ssl.SSLError: pass # Verify Public with appserver.AppServer(paste_conf, appserver.MAIN, **ssl_kwargs): conn = self.get_HTTPSConnection( '127.0.0.1', CONF.eventlet_server.public_port) try: conn.request('GET', '/') self.fail('Public API shoulda failed with SSL handshake!') except ssl.SSLError: pass keystone-9.0.0/keystone/tests/unit/test_v3_policy.py0000664000567000056710000000424712701407105024030 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import uuid from keystone.tests import unit from keystone.tests.unit import test_v3 class PolicyTestCase(test_v3.RestfulTestCase): """Test policy CRUD.""" def setUp(self): super(PolicyTestCase, self).setUp() self.policy = unit.new_policy_ref() self.policy_id = self.policy['id'] self.policy_api.create_policy( self.policy_id, self.policy.copy()) # policy crud tests def test_create_policy(self): """Call ``POST /policies``.""" ref = unit.new_policy_ref() r = self.post('/policies', body={'policy': ref}) return self.assertValidPolicyResponse(r, ref) def test_list_policies(self): """Call ``GET /policies``.""" r = self.get('/policies') self.assertValidPolicyListResponse(r, ref=self.policy) def test_get_policy(self): """Call ``GET /policies/{policy_id}``.""" r = self.get( '/policies/%(policy_id)s' % {'policy_id': self.policy_id}) self.assertValidPolicyResponse(r, self.policy) def test_update_policy(self): """Call ``PATCH /policies/{policy_id}``.""" self.policy['blob'] = json.dumps({'data': uuid.uuid4().hex, }) r = self.patch( '/policies/%(policy_id)s' % {'policy_id': self.policy_id}, body={'policy': self.policy}) self.assertValidPolicyResponse(r, self.policy) def test_delete_policy(self): """Call ``DELETE /policies/{policy_id}``.""" self.delete( '/policies/%(policy_id)s' % {'policy_id': self.policy_id}) keystone-9.0.0/keystone/tests/unit/test_v3_catalog.py0000664000567000056710000011251512701407105024141 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import uuid from six.moves import http_client from testtools import matchers from keystone import catalog from keystone.tests import unit from keystone.tests.unit.ksfixtures import database from keystone.tests.unit import test_v3 class CatalogTestCase(test_v3.RestfulTestCase): """Test service & endpoint CRUD.""" # region crud tests def test_create_region_with_id(self): """Call ``PUT /regions/{region_id}`` w/o an ID in the request body.""" ref = unit.new_region_ref() region_id = ref.pop('id') r = self.put( '/regions/%s' % region_id, body={'region': ref}, expected_status=http_client.CREATED) self.assertValidRegionResponse(r, ref) # Double-check that the region ID was kept as-is and not # populated with a UUID, as is the case with POST /v3/regions self.assertEqual(region_id, r.json['region']['id']) def test_create_region_with_matching_ids(self): """Call ``PUT /regions/{region_id}`` with an ID in the request body.""" ref = unit.new_region_ref() region_id = ref['id'] r = self.put( '/regions/%s' % region_id, body={'region': ref}, expected_status=http_client.CREATED) self.assertValidRegionResponse(r, ref) # Double-check that the region ID was kept as-is and not # populated with a UUID, as is the case with POST /v3/regions self.assertEqual(region_id, r.json['region']['id']) def test_create_region_with_duplicate_id(self): """Call ``PUT /regions/{region_id}``.""" ref = dict(description="my region") self.put( '/regions/myregion', body={'region': ref}, expected_status=http_client.CREATED) # Create region again with duplicate id self.put( '/regions/myregion', body={'region': ref}, expected_status=http_client.CONFLICT) def test_create_region(self): """Call ``POST /regions`` with an ID in the request body.""" # the ref will have an ID defined on it ref = unit.new_region_ref() r = self.post( '/regions', body={'region': ref}) self.assertValidRegionResponse(r, ref) # we should be able to get the region, having defined the ID ourselves r = self.get( '/regions/%(region_id)s' % { 'region_id': ref['id']}) self.assertValidRegionResponse(r, ref) def test_create_region_with_empty_id(self): """Call ``POST /regions`` with an empty ID in the request body.""" ref = unit.new_region_ref(id='') r = self.post('/regions', body={'region': ref}) self.assertValidRegionResponse(r, ref) self.assertNotEmpty(r.result['region'].get('id')) def test_create_region_without_id(self): """Call ``POST /regions`` without an ID in the request body.""" ref = unit.new_region_ref() # instead of defining the ID ourselves... del ref['id'] # let the service define the ID r = self.post('/regions', body={'region': ref}) self.assertValidRegionResponse(r, ref) def test_create_region_without_description(self): """Call ``POST /regions`` without description in the request body.""" ref = unit.new_region_ref(description=None) del ref['description'] r = self.post('/regions', body={'region': ref}) # Create the description in the reference to compare to since the # response should now have a description, even though we didn't send # it with the original reference. ref['description'] = '' self.assertValidRegionResponse(r, ref) def test_create_regions_with_same_description_string(self): """Call ``POST /regions`` with duplicate descriptions.""" # NOTE(lbragstad): Make sure we can create two regions that have the # same description. region_desc = 'Some Region Description' ref1 = unit.new_region_ref(description=region_desc) ref2 = unit.new_region_ref(description=region_desc) resp1 = self.post('/regions', body={'region': ref1}) self.assertValidRegionResponse(resp1, ref1) resp2 = self.post('/regions', body={'region': ref2}) self.assertValidRegionResponse(resp2, ref2) def test_create_regions_without_descriptions(self): """Call ``POST /regions`` with no description.""" # NOTE(lbragstad): Make sure we can create two regions that have # no description in the request body. The description should be # populated by Catalog Manager. ref1 = unit.new_region_ref() ref2 = unit.new_region_ref() del ref1['description'] ref2['description'] = None resp1 = self.post('/regions', body={'region': ref1}) resp2 = self.post('/regions', body={'region': ref2}) # Create the descriptions in the references to compare to since the # responses should now have descriptions, even though we didn't send # a description with the original references. ref1['description'] = '' ref2['description'] = '' self.assertValidRegionResponse(resp1, ref1) self.assertValidRegionResponse(resp2, ref2) def test_create_region_with_conflicting_ids(self): """Call ``PUT /regions/{region_id}`` with conflicting region IDs.""" # the region ref is created with an ID ref = unit.new_region_ref() # but instead of using that ID, make up a new, conflicting one self.put( '/regions/%s' % uuid.uuid4().hex, body={'region': ref}, expected_status=http_client.BAD_REQUEST) def test_list_regions(self): """Call ``GET /regions``.""" r = self.get('/regions') self.assertValidRegionListResponse(r, ref=self.region) def _create_region_with_parent_id(self, parent_id=None): ref = unit.new_region_ref(parent_region_id=parent_id) return self.post( '/regions', body={'region': ref}) def test_list_regions_filtered_by_parent_region_id(self): """Call ``GET /regions?parent_region_id={parent_region_id}``.""" new_region = self._create_region_with_parent_id() parent_id = new_region.result['region']['id'] new_region = self._create_region_with_parent_id(parent_id) new_region = self._create_region_with_parent_id(parent_id) r = self.get('/regions?parent_region_id=%s' % parent_id) for region in r.result['regions']: self.assertEqual(parent_id, region['parent_region_id']) def test_get_region(self): """Call ``GET /regions/{region_id}``.""" r = self.get('/regions/%(region_id)s' % { 'region_id': self.region_id}) self.assertValidRegionResponse(r, self.region) def test_update_region(self): """Call ``PATCH /regions/{region_id}``.""" region = unit.new_region_ref() del region['id'] r = self.patch('/regions/%(region_id)s' % { 'region_id': self.region_id}, body={'region': region}) self.assertValidRegionResponse(r, region) def test_update_region_without_description_keeps_original(self): """Call ``PATCH /regions/{region_id}``.""" region_ref = unit.new_region_ref() resp = self.post('/regions', body={'region': region_ref}) region_updates = { # update with something that's not the description 'parent_region_id': self.region_id, } resp = self.patch('/regions/%s' % region_ref['id'], body={'region': region_updates}) # NOTE(dstanek): Keystone should keep the original description. self.assertEqual(region_ref['description'], resp.result['region']['description']) def test_update_region_with_null_description(self): """Call ``PATCH /regions/{region_id}``.""" region = unit.new_region_ref(description=None) del region['id'] r = self.patch('/regions/%(region_id)s' % { 'region_id': self.region_id}, body={'region': region}) # NOTE(dstanek): Keystone should turn the provided None value into # an empty string before storing in the backend. region['description'] = '' self.assertValidRegionResponse(r, region) def test_delete_region(self): """Call ``DELETE /regions/{region_id}``.""" ref = unit.new_region_ref() r = self.post( '/regions', body={'region': ref}) self.assertValidRegionResponse(r, ref) self.delete('/regions/%(region_id)s' % { 'region_id': ref['id']}) # service crud tests def test_create_service(self): """Call ``POST /services``.""" ref = unit.new_service_ref() r = self.post( '/services', body={'service': ref}) self.assertValidServiceResponse(r, ref) def test_create_service_no_name(self): """Call ``POST /services``.""" ref = unit.new_service_ref() del ref['name'] r = self.post( '/services', body={'service': ref}) ref['name'] = '' self.assertValidServiceResponse(r, ref) def test_create_service_no_enabled(self): """Call ``POST /services``.""" ref = unit.new_service_ref() del ref['enabled'] r = self.post( '/services', body={'service': ref}) ref['enabled'] = True self.assertValidServiceResponse(r, ref) self.assertIs(True, r.result['service']['enabled']) def test_create_service_enabled_false(self): """Call ``POST /services``.""" ref = unit.new_service_ref(enabled=False) r = self.post( '/services', body={'service': ref}) self.assertValidServiceResponse(r, ref) self.assertIs(False, r.result['service']['enabled']) def test_create_service_enabled_true(self): """Call ``POST /services``.""" ref = unit.new_service_ref(enabled=True) r = self.post( '/services', body={'service': ref}) self.assertValidServiceResponse(r, ref) self.assertIs(True, r.result['service']['enabled']) def test_create_service_enabled_str_true(self): """Call ``POST /services``.""" ref = unit.new_service_ref(enabled='True') self.post('/services', body={'service': ref}, expected_status=http_client.BAD_REQUEST) def test_create_service_enabled_str_false(self): """Call ``POST /services``.""" ref = unit.new_service_ref(enabled='False') self.post('/services', body={'service': ref}, expected_status=http_client.BAD_REQUEST) def test_create_service_enabled_str_random(self): """Call ``POST /services``.""" ref = unit.new_service_ref(enabled='puppies') self.post('/services', body={'service': ref}, expected_status=http_client.BAD_REQUEST) def test_list_services(self): """Call ``GET /services``.""" r = self.get('/services') self.assertValidServiceListResponse(r, ref=self.service) def _create_random_service(self): ref = unit.new_service_ref() response = self.post( '/services', body={'service': ref}) return response.json['service'] def test_filter_list_services_by_type(self): """Call ``GET /services?type=``.""" target_ref = self._create_random_service() # create unrelated services self._create_random_service() self._create_random_service() response = self.get('/services?type=' + target_ref['type']) self.assertValidServiceListResponse(response, ref=target_ref) filtered_service_list = response.json['services'] self.assertEqual(1, len(filtered_service_list)) filtered_service = filtered_service_list[0] self.assertEqual(target_ref['type'], filtered_service['type']) def test_filter_list_services_by_name(self): """Call ``GET /services?name=``.""" target_ref = self._create_random_service() # create unrelated services self._create_random_service() self._create_random_service() response = self.get('/services?name=' + target_ref['name']) self.assertValidServiceListResponse(response, ref=target_ref) filtered_service_list = response.json['services'] self.assertEqual(1, len(filtered_service_list)) filtered_service = filtered_service_list[0] self.assertEqual(target_ref['name'], filtered_service['name']) def test_get_service(self): """Call ``GET /services/{service_id}``.""" r = self.get('/services/%(service_id)s' % { 'service_id': self.service_id}) self.assertValidServiceResponse(r, self.service) def test_update_service(self): """Call ``PATCH /services/{service_id}``.""" service = unit.new_service_ref() del service['id'] r = self.patch('/services/%(service_id)s' % { 'service_id': self.service_id}, body={'service': service}) self.assertValidServiceResponse(r, service) def test_delete_service(self): """Call ``DELETE /services/{service_id}``.""" self.delete('/services/%(service_id)s' % { 'service_id': self.service_id}) # endpoint crud tests def test_list_endpoints(self): """Call ``GET /endpoints``.""" r = self.get('/endpoints') self.assertValidEndpointListResponse(r, ref=self.endpoint) def _create_random_endpoint(self, interface='public', parent_region_id=None): region = self._create_region_with_parent_id( parent_id=parent_region_id) service = self._create_random_service() ref = unit.new_endpoint_ref( service_id=service['id'], interface=interface, region_id=region.result['region']['id']) response = self.post( '/endpoints', body={'endpoint': ref}) return response.json['endpoint'] def test_list_endpoints_filtered_by_interface(self): """Call ``GET /endpoints?interface={interface}``.""" ref = self._create_random_endpoint(interface='internal') response = self.get('/endpoints?interface=%s' % ref['interface']) self.assertValidEndpointListResponse(response, ref=ref) for endpoint in response.json['endpoints']: self.assertEqual(ref['interface'], endpoint['interface']) def test_list_endpoints_filtered_by_service_id(self): """Call ``GET /endpoints?service_id={service_id}``.""" ref = self._create_random_endpoint() response = self.get('/endpoints?service_id=%s' % ref['service_id']) self.assertValidEndpointListResponse(response, ref=ref) for endpoint in response.json['endpoints']: self.assertEqual(ref['service_id'], endpoint['service_id']) def test_list_endpoints_filtered_by_region_id(self): """Call ``GET /endpoints?region_id={region_id}``.""" ref = self._create_random_endpoint() response = self.get('/endpoints?region_id=%s' % ref['region_id']) self.assertValidEndpointListResponse(response, ref=ref) for endpoint in response.json['endpoints']: self.assertEqual(ref['region_id'], endpoint['region_id']) def test_list_endpoints_filtered_by_parent_region_id(self): """Call ``GET /endpoints?region_id={region_id}``. Ensure passing the parent_region_id as filter returns an empty list. """ parent_region = self._create_region_with_parent_id() parent_region_id = parent_region.result['region']['id'] self._create_random_endpoint(parent_region_id=parent_region_id) response = self.get('/endpoints?region_id=%s' % parent_region_id) self.assertEqual(0, len(response.json['endpoints'])) def test_list_endpoints_with_multiple_filters(self): """Call ``GET /endpoints?interface={interface}...``. Ensure passing different combinations of interface, region_id and service_id as filters will return the correct result. """ # interface and region_id specified ref = self._create_random_endpoint(interface='internal') response = self.get('/endpoints?interface=%s®ion_id=%s' % (ref['interface'], ref['region_id'])) self.assertValidEndpointListResponse(response, ref=ref) for endpoint in response.json['endpoints']: self.assertEqual(ref['interface'], endpoint['interface']) self.assertEqual(ref['region_id'], endpoint['region_id']) # interface and service_id specified ref = self._create_random_endpoint(interface='internal') response = self.get('/endpoints?interface=%s&service_id=%s' % (ref['interface'], ref['service_id'])) self.assertValidEndpointListResponse(response, ref=ref) for endpoint in response.json['endpoints']: self.assertEqual(ref['interface'], endpoint['interface']) self.assertEqual(ref['service_id'], endpoint['service_id']) # region_id and service_id specified ref = self._create_random_endpoint(interface='internal') response = self.get('/endpoints?region_id=%s&service_id=%s' % (ref['region_id'], ref['service_id'])) self.assertValidEndpointListResponse(response, ref=ref) for endpoint in response.json['endpoints']: self.assertEqual(ref['region_id'], endpoint['region_id']) self.assertEqual(ref['service_id'], endpoint['service_id']) # interface, region_id and service_id specified ref = self._create_random_endpoint(interface='internal') response = self.get(('/endpoints?interface=%s®ion_id=%s' '&service_id=%s') % (ref['interface'], ref['region_id'], ref['service_id'])) self.assertValidEndpointListResponse(response, ref=ref) for endpoint in response.json['endpoints']: self.assertEqual(ref['interface'], endpoint['interface']) self.assertEqual(ref['region_id'], endpoint['region_id']) self.assertEqual(ref['service_id'], endpoint['service_id']) def test_list_endpoints_with_random_filter_values(self): """Call ``GET /endpoints?interface={interface}...``. Ensure passing random values for: interface, region_id and service_id will return an empty list. """ self._create_random_endpoint(interface='internal') response = self.get('/endpoints?interface=%s' % uuid.uuid4().hex) self.assertEqual(0, len(response.json['endpoints'])) response = self.get('/endpoints?region_id=%s' % uuid.uuid4().hex) self.assertEqual(0, len(response.json['endpoints'])) response = self.get('/endpoints?service_id=%s' % uuid.uuid4().hex) self.assertEqual(0, len(response.json['endpoints'])) def test_create_endpoint_no_enabled(self): """Call ``POST /endpoints``.""" ref = unit.new_endpoint_ref(service_id=self.service_id, interface='public', region_id=self.region_id) r = self.post('/endpoints', body={'endpoint': ref}) ref['enabled'] = True self.assertValidEndpointResponse(r, ref) def test_create_endpoint_enabled_true(self): """Call ``POST /endpoints`` with enabled: true.""" ref = unit.new_endpoint_ref(service_id=self.service_id, interface='public', region_id=self.region_id, enabled=True) r = self.post('/endpoints', body={'endpoint': ref}) self.assertValidEndpointResponse(r, ref) def test_create_endpoint_enabled_false(self): """Call ``POST /endpoints`` with enabled: false.""" ref = unit.new_endpoint_ref(service_id=self.service_id, interface='public', region_id=self.region_id, enabled=False) r = self.post('/endpoints', body={'endpoint': ref}) self.assertValidEndpointResponse(r, ref) def test_create_endpoint_enabled_str_true(self): """Call ``POST /endpoints`` with enabled: 'True'.""" ref = unit.new_endpoint_ref(service_id=self.service_id, interface='public', region_id=self.region_id, enabled='True') self.post('/endpoints', body={'endpoint': ref}, expected_status=http_client.BAD_REQUEST) def test_create_endpoint_enabled_str_false(self): """Call ``POST /endpoints`` with enabled: 'False'.""" ref = unit.new_endpoint_ref(service_id=self.service_id, interface='public', region_id=self.region_id, enabled='False') self.post('/endpoints', body={'endpoint': ref}, expected_status=http_client.BAD_REQUEST) def test_create_endpoint_enabled_str_random(self): """Call ``POST /endpoints`` with enabled: 'puppies'.""" ref = unit.new_endpoint_ref(service_id=self.service_id, interface='public', region_id=self.region_id, enabled='puppies') self.post('/endpoints', body={'endpoint': ref}, expected_status=http_client.BAD_REQUEST) def test_create_endpoint_with_invalid_region_id(self): """Call ``POST /endpoints``.""" ref = unit.new_endpoint_ref(service_id=self.service_id) self.post('/endpoints', body={'endpoint': ref}, expected_status=http_client.BAD_REQUEST) def test_create_endpoint_with_region(self): """EndpointV3 creates the region before creating the endpoint. This occurs when endpoint is provided with 'region' and no 'region_id'. """ ref = unit.new_endpoint_ref_with_region(service_id=self.service_id, region=uuid.uuid4().hex) self.post('/endpoints', body={'endpoint': ref}) # Make sure the region is created self.get('/regions/%(region_id)s' % {'region_id': ref["region"]}) def test_create_endpoint_with_no_region(self): """EndpointV3 allows to creates the endpoint without region.""" ref = unit.new_endpoint_ref(service_id=self.service_id, region_id=None) del ref['region_id'] # cannot just be None, it needs to not exist self.post('/endpoints', body={'endpoint': ref}) def test_create_endpoint_with_empty_url(self): """Call ``POST /endpoints``.""" ref = unit.new_endpoint_ref(service_id=self.service_id, url='') self.post('/endpoints', body={'endpoint': ref}, expected_status=http_client.BAD_REQUEST) def test_get_endpoint(self): """Call ``GET /endpoints/{endpoint_id}``.""" r = self.get( '/endpoints/%(endpoint_id)s' % { 'endpoint_id': self.endpoint_id}) self.assertValidEndpointResponse(r, self.endpoint) def test_update_endpoint(self): """Call ``PATCH /endpoints/{endpoint_id}``.""" ref = unit.new_endpoint_ref(service_id=self.service_id, interface='public', region_id=self.region_id) del ref['id'] r = self.patch( '/endpoints/%(endpoint_id)s' % { 'endpoint_id': self.endpoint_id}, body={'endpoint': ref}) ref['enabled'] = True self.assertValidEndpointResponse(r, ref) def test_update_endpoint_enabled_true(self): """Call ``PATCH /endpoints/{endpoint_id}`` with enabled: True.""" r = self.patch( '/endpoints/%(endpoint_id)s' % { 'endpoint_id': self.endpoint_id}, body={'endpoint': {'enabled': True}}) self.assertValidEndpointResponse(r, self.endpoint) def test_update_endpoint_enabled_false(self): """Call ``PATCH /endpoints/{endpoint_id}`` with enabled: False.""" r = self.patch( '/endpoints/%(endpoint_id)s' % { 'endpoint_id': self.endpoint_id}, body={'endpoint': {'enabled': False}}) exp_endpoint = copy.copy(self.endpoint) exp_endpoint['enabled'] = False self.assertValidEndpointResponse(r, exp_endpoint) def test_update_endpoint_enabled_str_true(self): """Call ``PATCH /endpoints/{endpoint_id}`` with enabled: 'True'.""" self.patch( '/endpoints/%(endpoint_id)s' % { 'endpoint_id': self.endpoint_id}, body={'endpoint': {'enabled': 'True'}}, expected_status=http_client.BAD_REQUEST) def test_update_endpoint_enabled_str_false(self): """Call ``PATCH /endpoints/{endpoint_id}`` with enabled: 'False'.""" self.patch( '/endpoints/%(endpoint_id)s' % { 'endpoint_id': self.endpoint_id}, body={'endpoint': {'enabled': 'False'}}, expected_status=http_client.BAD_REQUEST) def test_update_endpoint_enabled_str_random(self): """Call ``PATCH /endpoints/{endpoint_id}`` with enabled: 'kitties'.""" self.patch( '/endpoints/%(endpoint_id)s' % { 'endpoint_id': self.endpoint_id}, body={'endpoint': {'enabled': 'kitties'}}, expected_status=http_client.BAD_REQUEST) def test_delete_endpoint(self): """Call ``DELETE /endpoints/{endpoint_id}``.""" self.delete( '/endpoints/%(endpoint_id)s' % { 'endpoint_id': self.endpoint_id}) def test_create_endpoint_on_v2(self): # clear the v3 endpoint so we only have endpoints created on v2 self.delete( '/endpoints/%(endpoint_id)s' % { 'endpoint_id': self.endpoint_id}) # create a v3 endpoint ref, and then tweak it back to a v2-style ref ref = unit.new_endpoint_ref_with_region(service_id=self.service['id'], region=uuid.uuid4().hex, internalurl=None) del ref['id'] del ref['interface'] ref['publicurl'] = ref.pop('url') # don't set adminurl to ensure it's absence is handled like internalurl # create the endpoint on v2 (using a v3 token) r = self.admin_request( method='POST', path='/v2.0/endpoints', token=self.get_scoped_token(), body={'endpoint': ref}) endpoint_v2 = r.result['endpoint'] # test the endpoint on v3 r = self.get('/endpoints') endpoints = self.assertValidEndpointListResponse(r) self.assertEqual(1, len(endpoints)) endpoint_v3 = endpoints.pop() # these attributes are identical between both APIs self.assertEqual(ref['region'], endpoint_v3['region_id']) self.assertEqual(ref['service_id'], endpoint_v3['service_id']) self.assertEqual(ref['description'], endpoint_v3['description']) # a v2 endpoint is not quite the same concept as a v3 endpoint, so they # receive different identifiers self.assertNotEqual(endpoint_v2['id'], endpoint_v3['id']) # v2 has a publicurl; v3 has a url + interface type self.assertEqual(ref['publicurl'], endpoint_v3['url']) self.assertEqual('public', endpoint_v3['interface']) # tests for bug 1152632 -- these attributes were being returned by v3 self.assertNotIn('publicurl', endpoint_v3) self.assertNotIn('adminurl', endpoint_v3) self.assertNotIn('internalurl', endpoint_v3) # test for bug 1152635 -- this attribute was being returned by v3 self.assertNotIn('legacy_endpoint_id', endpoint_v3) self.assertEqual(endpoint_v2['region'], endpoint_v3['region_id']) def test_deleting_endpoint_with_space_in_url(self): # add a space to all urls (intentional "i d" to test bug) url_with_space = "http://127.0.0.1:8774 /v1.1/\$(tenant_i d)s" # create a v3 endpoint ref ref = unit.new_endpoint_ref(service_id=self.service['id'], region_id=None, publicurl=url_with_space, internalurl=url_with_space, adminurl=url_with_space, url=url_with_space) # add the endpoint to the database self.catalog_api.create_endpoint(ref['id'], ref) # delete the endpoint self.delete('/endpoints/%s' % ref['id']) # make sure it's deleted (GET should return Not Found) self.get('/endpoints/%s' % ref['id'], expected_status=http_client.NOT_FOUND) def test_endpoint_create_with_valid_url(self): """Create endpoint with valid url should be tested,too.""" # list one valid url is enough, no need to list too much valid_url = 'http://127.0.0.1:8774/v1.1/$(tenant_id)s' ref = unit.new_endpoint_ref(self.service_id, interface='public', region_id=self.region_id, url=valid_url) self.post('/endpoints', body={'endpoint': ref}) def test_endpoint_create_with_valid_url_project_id(self): """Create endpoint with valid url should be tested,too.""" valid_url = 'http://127.0.0.1:8774/v1.1/$(project_id)s' ref = unit.new_endpoint_ref(self.service_id, interface='public', region_id=self.region_id, url=valid_url) self.post('/endpoints', body={'endpoint': ref}) def test_endpoint_create_with_invalid_url(self): """Test the invalid cases: substitutions is not exactly right.""" invalid_urls = [ # using a substitution that is not whitelisted - KeyError 'http://127.0.0.1:8774/v1.1/$(nonexistent)s', # invalid formatting - ValueError 'http://127.0.0.1:8774/v1.1/$(tenant_id)', 'http://127.0.0.1:8774/v1.1/$(tenant_id)t', 'http://127.0.0.1:8774/v1.1/$(tenant_id', # invalid type specifier - TypeError # admin_url is a string not an int 'http://127.0.0.1:8774/v1.1/$(admin_url)d', ] ref = unit.new_endpoint_ref(self.service_id) for invalid_url in invalid_urls: ref['url'] = invalid_url self.post('/endpoints', body={'endpoint': ref}, expected_status=http_client.BAD_REQUEST) class TestCatalogAPISQL(unit.TestCase): """Tests for the catalog Manager against the SQL backend.""" def setUp(self): super(TestCatalogAPISQL, self).setUp() self.useFixture(database.Database()) self.catalog_api = catalog.Manager() service = unit.new_service_ref() self.service_id = service['id'] self.catalog_api.create_service(self.service_id, service) self.create_endpoint(service_id=self.service_id) def create_endpoint(self, service_id, **kwargs): endpoint = unit.new_endpoint_ref(service_id=service_id, region_id=None, **kwargs) self.catalog_api.create_endpoint(endpoint['id'], endpoint) return endpoint def config_overrides(self): super(TestCatalogAPISQL, self).config_overrides() self.config_fixture.config(group='catalog', driver='sql') def test_get_catalog_ignores_endpoints_with_invalid_urls(self): user_id = uuid.uuid4().hex tenant_id = uuid.uuid4().hex # the only endpoint in the catalog is the one created in setUp catalog = self.catalog_api.get_v3_catalog(user_id, tenant_id) self.assertEqual(1, len(catalog[0]['endpoints'])) # it's also the only endpoint in the backend self.assertEqual(1, len(self.catalog_api.list_endpoints())) # create a new, invalid endpoint - malformed type declaration self.create_endpoint(self.service_id, url='http://keystone/%(tenant_id)') # create a new, invalid endpoint - nonexistent key self.create_endpoint(self.service_id, url='http://keystone/%(you_wont_find_me)s') # verify that the invalid endpoints don't appear in the catalog catalog = self.catalog_api.get_v3_catalog(user_id, tenant_id) self.assertEqual(1, len(catalog[0]['endpoints'])) # all three appear in the backend self.assertEqual(3, len(self.catalog_api.list_endpoints())) # create another valid endpoint - tenant_id will be replaced self.create_endpoint(self.service_id, url='http://keystone/%(tenant_id)s') # there are two valid endpoints, positive check catalog = self.catalog_api.get_v3_catalog(user_id, tenant_id) self.assertThat(catalog[0]['endpoints'], matchers.HasLength(2)) # If the URL has no 'tenant_id' to substitute, we will skip the # endpoint which contains this kind of URL, negative check. tenant_id = None catalog = self.catalog_api.get_v3_catalog(user_id, tenant_id) self.assertThat(catalog[0]['endpoints'], matchers.HasLength(1)) def test_get_catalog_always_returns_service_name(self): user_id = uuid.uuid4().hex tenant_id = uuid.uuid4().hex # create a service, with a name named_svc = unit.new_service_ref() self.catalog_api.create_service(named_svc['id'], named_svc) self.create_endpoint(service_id=named_svc['id']) # create a service, with no name unnamed_svc = unit.new_service_ref(name=None) del unnamed_svc['name'] self.catalog_api.create_service(unnamed_svc['id'], unnamed_svc) self.create_endpoint(service_id=unnamed_svc['id']) catalog = self.catalog_api.get_v3_catalog(user_id, tenant_id) named_endpoint = [ep for ep in catalog if ep['type'] == named_svc['type']][0] self.assertEqual(named_svc['name'], named_endpoint['name']) unnamed_endpoint = [ep for ep in catalog if ep['type'] == unnamed_svc['type']][0] self.assertEqual('', unnamed_endpoint['name']) # TODO(dstanek): this needs refactoring with the test above, but we are in a # crunch so that will happen in a future patch. class TestCatalogAPISQLRegions(unit.TestCase): """Tests for the catalog Manager against the SQL backend.""" def setUp(self): super(TestCatalogAPISQLRegions, self).setUp() self.useFixture(database.Database()) self.catalog_api = catalog.Manager() def config_overrides(self): super(TestCatalogAPISQLRegions, self).config_overrides() self.config_fixture.config(group='catalog', driver='sql') def test_get_catalog_returns_proper_endpoints_with_no_region(self): service = unit.new_service_ref() service_id = service['id'] self.catalog_api.create_service(service_id, service) endpoint = unit.new_endpoint_ref(service_id=service_id, region_id=None) del endpoint['region_id'] self.catalog_api.create_endpoint(endpoint['id'], endpoint) user_id = uuid.uuid4().hex tenant_id = uuid.uuid4().hex catalog = self.catalog_api.get_v3_catalog(user_id, tenant_id) self.assertValidCatalogEndpoint( catalog[0]['endpoints'][0], ref=endpoint) def test_get_catalog_returns_proper_endpoints_with_region(self): service = unit.new_service_ref() service_id = service['id'] self.catalog_api.create_service(service_id, service) endpoint = unit.new_endpoint_ref(service_id=service_id) region = unit.new_region_ref(id=endpoint['region_id']) self.catalog_api.create_region(region) self.catalog_api.create_endpoint(endpoint['id'], endpoint) endpoint = self.catalog_api.get_endpoint(endpoint['id']) user_id = uuid.uuid4().hex tenant_id = uuid.uuid4().hex catalog = self.catalog_api.get_v3_catalog(user_id, tenant_id) self.assertValidCatalogEndpoint( catalog[0]['endpoints'][0], ref=endpoint) def assertValidCatalogEndpoint(self, entity, ref=None): keys = ['description', 'id', 'interface', 'name', 'region_id', 'url'] for k in keys: self.assertEqual(ref.get(k), entity[k], k) self.assertEqual(entity['region_id'], entity['region']) keystone-9.0.0/keystone/tests/unit/test_middleware.py0000664000567000056710000007652412701407102024242 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import hashlib import uuid from oslo_config import cfg from six.moves import http_client import webtest from keystone.common import authorization from keystone.common import tokenless_auth from keystone import exception from keystone.federation import constants as federation_constants from keystone import middleware from keystone.tests import unit from keystone.tests.unit import mapping_fixtures from keystone.tests.unit import test_backend_sql CONF = cfg.CONF class MiddlewareRequestTestBase(unit.TestCase): MIDDLEWARE_CLASS = None # override this in subclasses def _application(self): """A base wsgi application that returns a simple response.""" def app(environ, start_response): # WSGI requires the body of the response to be six.binary_type body = uuid.uuid4().hex.encode('utf-8') resp_headers = [('Content-Type', 'text/html; charset=utf8'), ('Content-Length', str(len(body)))] start_response('200 OK', resp_headers) return [body] return app def _generate_app_response(self, app, headers=None, method='get', path='/', **kwargs): """Given a wsgi application wrap it in webtest and call it.""" return getattr(webtest.TestApp(app), method)(path, headers=headers or {}, **kwargs) def _middleware_failure(self, exc, *args, **kwargs): """Assert that an exception is being thrown from process_request.""" # NOTE(jamielennox): This is a little ugly. We need to call the webtest # framework so that the correct RequestClass object is created for when # we call process_request. However because we go via webtest we only # see the response object and not the actual exception that is thrown # by process_request. To get around this we subclass process_request # with something that checks for the right type of exception being # thrown so we can test the middle of the request process. # TODO(jamielennox): Change these tests to test the value of the # response rather than the error that is raised. class _Failing(self.MIDDLEWARE_CLASS): _called = False def process_request(i_self, *i_args, **i_kwargs): # i_ to distinguish it from and not clobber the outer vars e = self.assertRaises(exc, super(_Failing, i_self).process_request, *i_args, **i_kwargs) i_self._called = True raise e # by default the returned status when an uncaught exception is raised # for validation or caught errors this will likely be 400 kwargs.setdefault('status', http_client.INTERNAL_SERVER_ERROR) # 500 app = _Failing(self._application()) resp = self._generate_app_response(app, *args, **kwargs) self.assertTrue(app._called) return resp def _do_middleware_response(self, *args, **kwargs): """Wrap a middleware around a sample application and call it.""" app = self.MIDDLEWARE_CLASS(self._application()) return self._generate_app_response(app, *args, **kwargs) def _do_middleware_request(self, *args, **kwargs): """The request object from a successful middleware call.""" return self._do_middleware_response(*args, **kwargs).request class TokenAuthMiddlewareTest(MiddlewareRequestTestBase): MIDDLEWARE_CLASS = middleware.TokenAuthMiddleware def test_request(self): headers = {middleware.AUTH_TOKEN_HEADER: 'MAGIC'} req = self._do_middleware_request(headers=headers) context = req.environ[middleware.CONTEXT_ENV] self.assertEqual('MAGIC', context['token_id']) class AdminTokenAuthMiddlewareTest(MiddlewareRequestTestBase): MIDDLEWARE_CLASS = middleware.AdminTokenAuthMiddleware def config_overrides(self): super(AdminTokenAuthMiddlewareTest, self).config_overrides() self.config_fixture.config( admin_token='ADMIN') def test_request_admin(self): headers = {middleware.AUTH_TOKEN_HEADER: 'ADMIN'} req = self._do_middleware_request(headers=headers) self.assertTrue(req.environ[middleware.CONTEXT_ENV]['is_admin']) def test_request_non_admin(self): headers = {middleware.AUTH_TOKEN_HEADER: 'NOT-ADMIN'} req = self._do_middleware_request(headers=headers) self.assertFalse(req.environ[middleware.CONTEXT_ENV]['is_admin']) class JsonBodyMiddlewareTest(MiddlewareRequestTestBase): MIDDLEWARE_CLASS = middleware.JsonBodyMiddleware def test_request_with_params(self): headers = {'Content-Type': 'application/json'} params = '{"arg1": "one", "arg2": ["a"]}' req = self._do_middleware_request(params=params, headers=headers, method='post') self.assertEqual({"arg1": "one", "arg2": ["a"]}, req.environ[middleware.PARAMS_ENV]) def test_malformed_json(self): headers = {'Content-Type': 'application/json'} self._do_middleware_response(params='{"arg1": "on', headers=headers, method='post', status=http_client.BAD_REQUEST) def test_not_dict_body(self): headers = {'Content-Type': 'application/json'} resp = self._do_middleware_response(params='42', headers=headers, method='post', status=http_client.BAD_REQUEST) self.assertIn('valid JSON object', resp.json['error']['message']) def test_no_content_type(self): headers = {'Content-Type': ''} params = '{"arg1": "one", "arg2": ["a"]}' req = self._do_middleware_request(params=params, headers=headers, method='post') self.assertEqual({"arg1": "one", "arg2": ["a"]}, req.environ[middleware.PARAMS_ENV]) def test_unrecognized_content_type(self): headers = {'Content-Type': 'text/plain'} self._do_middleware_response(params='{"arg1": "one", "arg2": ["a"]}', headers=headers, method='post', status=http_client.BAD_REQUEST) def test_unrecognized_content_type_without_body(self): headers = {'Content-Type': 'text/plain'} req = self._do_middleware_request(headers=headers) self.assertEqual({}, req.environ.get(middleware.PARAMS_ENV, {})) class AuthContextMiddlewareTest(test_backend_sql.SqlTests, MiddlewareRequestTestBase): MIDDLEWARE_CLASS = middleware.AuthContextMiddleware def setUp(self): super(AuthContextMiddlewareTest, self).setUp() self.client_issuer = uuid.uuid4().hex self.untrusted_client_issuer = uuid.uuid4().hex self.trusted_issuer = self.client_issuer self.config_fixture.config(group='tokenless_auth', trusted_issuer=[self.trusted_issuer]) # client_issuer is encoded because you can't hash # unicode objects with hashlib. # This idp_id is calculated based on sha256(self.client_issuer) hashed_idp = hashlib.sha256(self.client_issuer.encode('utf-8')) self.idp_id = hashed_idp.hexdigest() self._load_sample_data() def _load_sample_data(self): self.protocol_id = 'x509' # 1) Create a domain for the user. self.domain = unit.new_domain_ref() self.domain_id = self.domain['id'] self.domain_name = self.domain['name'] self.resource_api.create_domain(self.domain_id, self.domain) # 2) Create a project for the user. self.project = unit.new_project_ref(domain_id=self.domain_id) self.project_id = self.project['id'] self.project_name = self.project['name'] self.resource_api.create_project(self.project_id, self.project) # 3) Create a user in new domain. self.user = unit.new_user_ref(domain_id=self.domain_id, project_id=self.project_id) self.user = self.identity_api.create_user(self.user) # Add IDP self.idp = self._idp_ref(id=self.idp_id) self.federation_api.create_idp(self.idp['id'], self.idp) # Add a role self.role = unit.new_role_ref() self.role_id = self.role['id'] self.role_name = self.role['name'] self.role_api.create_role(self.role_id, self.role) # Add a group self.group = unit.new_group_ref(domain_id=self.domain_id) self.group = self.identity_api.create_group(self.group) # Assign a role to the user on a project self.assignment_api.add_role_to_user_and_project( user_id=self.user['id'], tenant_id=self.project_id, role_id=self.role_id) # Assign a role to the group on a project self.assignment_api.create_grant( role_id=self.role_id, group_id=self.group['id'], project_id=self.project_id) def _load_mapping_rules(self, rules): # Add a mapping self.mapping = self._mapping_ref(rules=rules) self.federation_api.create_mapping(self.mapping['id'], self.mapping) # Add protocols self.proto_x509 = self._proto_ref(mapping_id=self.mapping['id']) self.proto_x509['id'] = self.protocol_id self.federation_api.create_protocol(self.idp['id'], self.proto_x509['id'], self.proto_x509) def _idp_ref(self, id=None): idp = { 'id': id or uuid.uuid4().hex, 'enabled': True, 'description': uuid.uuid4().hex } return idp def _proto_ref(self, mapping_id=None): proto = { 'id': uuid.uuid4().hex, 'mapping_id': mapping_id or uuid.uuid4().hex } return proto def _mapping_ref(self, rules=None): if rules is None: mapped_rules = {} else: mapped_rules = rules.get('rules', {}) return { 'id': uuid.uuid4().hex, 'rules': mapped_rules } def _assert_tokenless_auth_context(self, context, ephemeral_user=False): self.assertIsNotNone(context) self.assertEqual(self.project_id, context['project_id']) self.assertIn(self.role_name, context['roles']) if ephemeral_user: self.assertEqual(self.group['id'], context['group_ids'][0]) self.assertEqual('ephemeral', context[federation_constants.PROTOCOL]) self.assertEqual(self.idp_id, context[federation_constants.IDENTITY_PROVIDER]) else: self.assertEqual(self.user['id'], context['user_id']) def _create_context(self, request, mapping_ref=None, exception_expected=False): """Builds the auth context from the given arguments. auth context will be returned from the AuthContextMiddleware based on what is being passed in the given request and what mapping is being setup in the backend DB. :param request: HTTP request :param mapping_ref: A mapping in JSON structure will be setup in the backend DB for mapping a user or a group. :param exception_expected: Sets to True when an exception is expected to raised based on the given arguments. :returns: context an auth context contains user and role information :rtype: dict """ if mapping_ref: self._load_mapping_rules(mapping_ref) if not exception_expected: (middleware.AuthContextMiddleware('Tokenless_auth_test'). process_request(request)) context = request.environ.get(authorization.AUTH_CONTEXT_ENV) else: context = middleware.AuthContextMiddleware('Tokenless_auth_test') return context def test_context_already_exists(self): stub_value = uuid.uuid4().hex env = {authorization.AUTH_CONTEXT_ENV: stub_value} req = self._do_middleware_request(extra_environ=env) self.assertEqual(stub_value, req.environ.get(authorization.AUTH_CONTEXT_ENV)) def test_not_applicable_to_token_request(self): req = self._do_middleware_request(path='/auth/tokens', method='post') context = req.environ.get(authorization.AUTH_CONTEXT_ENV) self.assertIsNone(context) def test_no_tokenless_attributes_request(self): req = self._do_middleware_request() context = req.environ.get(authorization.AUTH_CONTEXT_ENV) self.assertIsNone(context) def test_no_issuer_attribute_request(self): env = {} env['HTTP_X_PROJECT_ID'] = uuid.uuid4().hex req = self._do_middleware_request(extra_environ=env) context = req.environ.get(authorization.AUTH_CONTEXT_ENV) self.assertIsNone(context) def test_has_only_issuer_and_project_name_request(self): env = {} # SSL_CLIENT_I_DN is the attribute name that wsgi env # references to issuer of the client certificate. env['SSL_CLIENT_I_DN'] = self.client_issuer env['HTTP_X_PROJECT_NAME'] = uuid.uuid4().hex self._middleware_failure(exception.ValidationError, extra_environ=env, status=400) def test_has_only_issuer_and_project_domain_name_request(self): env = {} env['SSL_CLIENT_I_DN'] = self.client_issuer env['HTTP_X_PROJECT_DOMAIN_NAME'] = uuid.uuid4().hex self._middleware_failure(exception.ValidationError, extra_environ=env, status=400) def test_has_only_issuer_and_project_domain_id_request(self): env = {} env['SSL_CLIENT_I_DN'] = self.client_issuer env['HTTP_X_PROJECT_DOMAIN_ID'] = uuid.uuid4().hex self._middleware_failure(exception.ValidationError, extra_environ=env, status=400) def test_missing_both_domain_and_project_request(self): env = {} env['SSL_CLIENT_I_DN'] = self.client_issuer self._middleware_failure(exception.ValidationError, extra_environ=env, status=400) def test_empty_trusted_issuer_list(self): env = {} env['SSL_CLIENT_I_DN'] = self.client_issuer env['HTTP_X_PROJECT_ID'] = uuid.uuid4().hex self.config_fixture.config(group='tokenless_auth', trusted_issuer=[]) req = self._do_middleware_request(extra_environ=env) context = req.environ.get(authorization.AUTH_CONTEXT_ENV) self.assertIsNone(context) def test_client_issuer_not_trusted(self): env = {} env['SSL_CLIENT_I_DN'] = self.untrusted_client_issuer env['HTTP_X_PROJECT_ID'] = uuid.uuid4().hex req = self._do_middleware_request(extra_environ=env) context = req.environ.get(authorization.AUTH_CONTEXT_ENV) self.assertIsNone(context) def test_proj_scope_with_proj_id_and_proj_dom_id_success(self): env = {} env['SSL_CLIENT_I_DN'] = self.client_issuer env['HTTP_X_PROJECT_ID'] = self.project_id env['HTTP_X_PROJECT_DOMAIN_ID'] = self.domain_id # SSL_CLIENT_USER_NAME and SSL_CLIENT_DOMAIN_NAME are the types # defined in the mapping that will map to the user name and # domain name env['SSL_CLIENT_USER_NAME'] = self.user['name'] env['SSL_CLIENT_DOMAIN_NAME'] = self.domain_name self._load_mapping_rules( mapping_fixtures.MAPPING_WITH_USERNAME_AND_DOMAINNAME) req = self._do_middleware_request(extra_environ=env) context = req.environ.get(authorization.AUTH_CONTEXT_ENV) self._assert_tokenless_auth_context(context) def test_proj_scope_with_proj_id_only_success(self): env = {} env['SSL_CLIENT_I_DN'] = self.client_issuer env['HTTP_X_PROJECT_ID'] = self.project_id env['SSL_CLIENT_USER_NAME'] = self.user['name'] env['SSL_CLIENT_DOMAIN_NAME'] = self.domain_name self._load_mapping_rules( mapping_fixtures.MAPPING_WITH_USERNAME_AND_DOMAINNAME) req = self._do_middleware_request(extra_environ=env) context = req.environ.get(authorization.AUTH_CONTEXT_ENV) self._assert_tokenless_auth_context(context) def test_proj_scope_with_proj_name_and_proj_dom_id_success(self): env = {} env['SSL_CLIENT_I_DN'] = self.client_issuer env['HTTP_X_PROJECT_NAME'] = self.project_name env['HTTP_X_PROJECT_DOMAIN_ID'] = self.domain_id env['SSL_CLIENT_USER_NAME'] = self.user['name'] env['SSL_CLIENT_DOMAIN_NAME'] = self.domain_name self._load_mapping_rules( mapping_fixtures.MAPPING_WITH_USERNAME_AND_DOMAINNAME) req = self._do_middleware_request(extra_environ=env) context = req.environ.get(authorization.AUTH_CONTEXT_ENV) self._assert_tokenless_auth_context(context) def test_proj_scope_with_proj_name_and_proj_dom_name_success(self): env = {} env['SSL_CLIENT_I_DN'] = self.client_issuer env['HTTP_X_PROJECT_NAME'] = self.project_name env['HTTP_X_PROJECT_DOMAIN_NAME'] = self.domain_name env['SSL_CLIENT_USER_NAME'] = self.user['name'] env['SSL_CLIENT_DOMAIN_NAME'] = self.domain_name self._load_mapping_rules( mapping_fixtures.MAPPING_WITH_USERNAME_AND_DOMAINNAME) req = self._do_middleware_request(extra_environ=env) context = req.environ.get(authorization.AUTH_CONTEXT_ENV) self._assert_tokenless_auth_context(context) def test_proj_scope_with_proj_name_only_fail(self): env = {} env['SSL_CLIENT_I_DN'] = self.client_issuer env['HTTP_X_PROJECT_NAME'] = self.project_id env['SSL_CLIENT_USER_NAME'] = self.user['name'] env['SSL_CLIENT_DOMAIN_NAME'] = self.domain_name self._load_mapping_rules( mapping_fixtures.MAPPING_WITH_USERNAME_AND_DOMAINNAME) self._middleware_failure(exception.ValidationError, extra_environ=env, status=400) def test_mapping_with_userid_and_domainid_success(self): env = {} env['SSL_CLIENT_I_DN'] = self.client_issuer env['HTTP_X_PROJECT_NAME'] = self.project_name env['HTTP_X_PROJECT_DOMAIN_NAME'] = self.domain_name env['SSL_CLIENT_USER_ID'] = self.user['id'] env['SSL_CLIENT_DOMAIN_ID'] = self.domain_id self._load_mapping_rules( mapping_fixtures.MAPPING_WITH_USERID_AND_DOMAINID) req = self._do_middleware_request(extra_environ=env) context = req.environ.get(authorization.AUTH_CONTEXT_ENV) self._assert_tokenless_auth_context(context) def test_mapping_with_userid_and_domainname_success(self): env = {} env['SSL_CLIENT_I_DN'] = self.client_issuer env['HTTP_X_PROJECT_NAME'] = self.project_name env['HTTP_X_PROJECT_DOMAIN_NAME'] = self.domain_name env['SSL_CLIENT_USER_ID'] = self.user['id'] env['SSL_CLIENT_DOMAIN_NAME'] = self.domain_name self._load_mapping_rules( mapping_fixtures.MAPPING_WITH_USERID_AND_DOMAINNAME) req = self._do_middleware_request(extra_environ=env) context = req.environ.get(authorization.AUTH_CONTEXT_ENV) self._assert_tokenless_auth_context(context) def test_mapping_with_username_and_domainid_success(self): env = {} env['SSL_CLIENT_I_DN'] = self.client_issuer env['HTTP_X_PROJECT_NAME'] = self.project_name env['HTTP_X_PROJECT_DOMAIN_NAME'] = self.domain_name env['SSL_CLIENT_USER_NAME'] = self.user['name'] env['SSL_CLIENT_DOMAIN_ID'] = self.domain_id self._load_mapping_rules( mapping_fixtures.MAPPING_WITH_USERNAME_AND_DOMAINID) req = self._do_middleware_request(extra_environ=env) context = req.environ.get(authorization.AUTH_CONTEXT_ENV) self._assert_tokenless_auth_context(context) def test_only_domain_name_fail(self): env = {} env['SSL_CLIENT_I_DN'] = self.client_issuer env['HTTP_X_PROJECT_ID'] = self.project_id env['HTTP_X_PROJECT_DOMAIN_ID'] = self.domain_id env['SSL_CLIENT_DOMAIN_NAME'] = self.domain_name self._load_mapping_rules( mapping_fixtures.MAPPING_WITH_DOMAINNAME_ONLY) self._middleware_failure(exception.ValidationError, extra_environ=env, status=400) def test_only_domain_id_fail(self): env = {} env['SSL_CLIENT_I_DN'] = self.client_issuer env['HTTP_X_PROJECT_ID'] = self.project_id env['HTTP_X_PROJECT_DOMAIN_ID'] = self.domain_id env['SSL_CLIENT_DOMAIN_ID'] = self.domain_id self._load_mapping_rules( mapping_fixtures.MAPPING_WITH_DOMAINID_ONLY) self._middleware_failure(exception.ValidationError, extra_environ=env, status=400) def test_missing_domain_data_fail(self): env = {} env['SSL_CLIENT_I_DN'] = self.client_issuer env['HTTP_X_PROJECT_ID'] = self.project_id env['HTTP_X_PROJECT_DOMAIN_ID'] = self.domain_id env['SSL_CLIENT_USER_NAME'] = self.user['name'] self._load_mapping_rules( mapping_fixtures.MAPPING_WITH_USERNAME_ONLY) self._middleware_failure(exception.ValidationError, extra_environ=env, status=400) def test_userid_success(self): env = {} env['SSL_CLIENT_I_DN'] = self.client_issuer env['HTTP_X_PROJECT_ID'] = self.project_id env['HTTP_X_PROJECT_DOMAIN_ID'] = self.domain_id env['SSL_CLIENT_USER_ID'] = self.user['id'] self._load_mapping_rules(mapping_fixtures.MAPPING_WITH_USERID_ONLY) req = self._do_middleware_request(extra_environ=env) context = req.environ.get(authorization.AUTH_CONTEXT_ENV) self._assert_tokenless_auth_context(context) def test_domain_disable_fail(self): env = {} env['SSL_CLIENT_I_DN'] = self.client_issuer env['HTTP_X_PROJECT_NAME'] = self.project_name env['HTTP_X_PROJECT_DOMAIN_NAME'] = self.domain_name env['SSL_CLIENT_USER_NAME'] = self.user['name'] env['SSL_CLIENT_DOMAIN_ID'] = self.domain_id self.domain['enabled'] = False self.domain = self.resource_api.update_domain( self.domain['id'], self.domain) self._load_mapping_rules( mapping_fixtures.MAPPING_WITH_USERNAME_AND_DOMAINID) self._middleware_failure(exception.Unauthorized, extra_environ=env, status=401) def test_user_disable_fail(self): env = {} env['SSL_CLIENT_I_DN'] = self.client_issuer env['HTTP_X_PROJECT_NAME'] = self.project_name env['HTTP_X_PROJECT_DOMAIN_NAME'] = self.domain_name env['SSL_CLIENT_USER_NAME'] = self.user['name'] env['SSL_CLIENT_DOMAIN_ID'] = self.domain_id self.user['enabled'] = False self.user = self.identity_api.update_user(self.user['id'], self.user) self._load_mapping_rules( mapping_fixtures.MAPPING_WITH_USERNAME_AND_DOMAINID) self._middleware_failure(AssertionError, extra_environ=env) def test_invalid_user_fail(self): env = {} env['SSL_CLIENT_I_DN'] = self.client_issuer env['HTTP_X_PROJECT_ID'] = self.project_id env['HTTP_X_PROJECT_DOMAIN_ID'] = self.domain_id env['SSL_CLIENT_USER_NAME'] = uuid.uuid4().hex env['SSL_CLIENT_DOMAIN_NAME'] = self.domain_name self._load_mapping_rules( mapping_fixtures.MAPPING_WITH_USERNAME_AND_DOMAINNAME) self._middleware_failure(exception.UserNotFound, extra_environ=env, status=404) def test_ephemeral_success(self): env = {} env['SSL_CLIENT_I_DN'] = self.client_issuer env['HTTP_X_PROJECT_NAME'] = self.project_name env['HTTP_X_PROJECT_DOMAIN_NAME'] = self.domain_name env['SSL_CLIENT_USER_NAME'] = self.user['name'] self.config_fixture.config(group='tokenless_auth', protocol='ephemeral') self.protocol_id = 'ephemeral' mapping = copy.deepcopy(mapping_fixtures.MAPPING_FOR_EPHEMERAL_USER) mapping['rules'][0]['local'][0]['group']['id'] = self.group['id'] self._load_mapping_rules(mapping) req = self._do_middleware_request(extra_environ=env) context = req.environ.get(authorization.AUTH_CONTEXT_ENV) self._assert_tokenless_auth_context(context, ephemeral_user=True) def test_ephemeral_with_default_user_type_success(self): env = {} env['SSL_CLIENT_I_DN'] = self.client_issuer env['HTTP_X_PROJECT_NAME'] = self.project_name env['HTTP_X_PROJECT_DOMAIN_NAME'] = self.domain_name env['SSL_CLIENT_USER_NAME'] = self.user['name'] self.config_fixture.config(group='tokenless_auth', protocol='ephemeral') self.protocol_id = 'ephemeral' # this mapping does not have the user type defined # and it should defaults to 'ephemeral' which is # the expected type for the test case. mapping = copy.deepcopy( mapping_fixtures.MAPPING_FOR_DEFAULT_EPHEMERAL_USER) mapping['rules'][0]['local'][0]['group']['id'] = self.group['id'] self._load_mapping_rules(mapping) req = self._do_middleware_request(extra_environ=env) context = req.environ.get(authorization.AUTH_CONTEXT_ENV) self._assert_tokenless_auth_context(context, ephemeral_user=True) def test_ephemeral_any_user_success(self): """Verify ephemeral user does not need a specified user. Keystone is not looking to match the user, but a corresponding group. """ env = {} env['SSL_CLIENT_I_DN'] = self.client_issuer env['HTTP_X_PROJECT_NAME'] = self.project_name env['HTTP_X_PROJECT_DOMAIN_NAME'] = self.domain_name env['SSL_CLIENT_USER_NAME'] = uuid.uuid4().hex self.config_fixture.config(group='tokenless_auth', protocol='ephemeral') self.protocol_id = 'ephemeral' mapping = copy.deepcopy(mapping_fixtures.MAPPING_FOR_EPHEMERAL_USER) mapping['rules'][0]['local'][0]['group']['id'] = self.group['id'] self._load_mapping_rules(mapping) req = self._do_middleware_request(extra_environ=env) context = req.environ.get(authorization.AUTH_CONTEXT_ENV) self._assert_tokenless_auth_context(context, ephemeral_user=True) def test_ephemeral_invalid_scope_fail(self): env = {} env['SSL_CLIENT_I_DN'] = self.client_issuer env['HTTP_X_PROJECT_NAME'] = uuid.uuid4().hex env['HTTP_X_PROJECT_DOMAIN_NAME'] = uuid.uuid4().hex env['SSL_CLIENT_USER_NAME'] = self.user['name'] self.config_fixture.config(group='tokenless_auth', protocol='ephemeral') self.protocol_id = 'ephemeral' mapping = copy.deepcopy(mapping_fixtures.MAPPING_FOR_EPHEMERAL_USER) mapping['rules'][0]['local'][0]['group']['id'] = self.group['id'] self._load_mapping_rules(mapping) self._middleware_failure(exception.Unauthorized, extra_environ=env, status=401) def test_ephemeral_no_group_found_fail(self): env = {} env['SSL_CLIENT_I_DN'] = self.client_issuer env['HTTP_X_PROJECT_NAME'] = self.project_name env['HTTP_X_PROJECT_DOMAIN_NAME'] = self.domain_name env['SSL_CLIENT_USER_NAME'] = self.user['name'] self.config_fixture.config(group='tokenless_auth', protocol='ephemeral') self.protocol_id = 'ephemeral' mapping = copy.deepcopy(mapping_fixtures.MAPPING_FOR_EPHEMERAL_USER) mapping['rules'][0]['local'][0]['group']['id'] = uuid.uuid4().hex self._load_mapping_rules(mapping) self._middleware_failure(exception.MappedGroupNotFound, extra_environ=env) def test_ephemeral_incorrect_mapping_fail(self): """Test ephemeral user picking up the non-ephemeral user mapping. Looking up the mapping with protocol Id 'x509' will load up the non-ephemeral user mapping, results unauthenticated. """ env = {} env['SSL_CLIENT_I_DN'] = self.client_issuer env['HTTP_X_PROJECT_NAME'] = self.project_name env['HTTP_X_PROJECT_DOMAIN_NAME'] = self.domain_name env['SSL_CLIENT_USER_NAME'] = self.user['name'] # This will pick up the incorrect mapping self.config_fixture.config(group='tokenless_auth', protocol='x509') self.protocol_id = 'x509' mapping = copy.deepcopy(mapping_fixtures.MAPPING_FOR_EPHEMERAL_USER) mapping['rules'][0]['local'][0]['group']['id'] = uuid.uuid4().hex self._load_mapping_rules(mapping) self._middleware_failure(exception.MappedGroupNotFound, extra_environ=env) def test_create_idp_id_success(self): env = {} env['SSL_CLIENT_I_DN'] = self.client_issuer auth = tokenless_auth.TokenlessAuthHelper(env) idp_id = auth._build_idp_id() self.assertEqual(self.idp_id, idp_id) def test_create_idp_id_attri_not_found_fail(self): env = {} env[uuid.uuid4().hex] = self.client_issuer auth = tokenless_auth.TokenlessAuthHelper(env) expected_msg = ('Could not determine Identity Provider ID. The ' 'configuration option %s was not found in the ' 'request environment.' % CONF.tokenless_auth.issuer_attribute) # Check the content of the exception message as well self.assertRaisesRegexp(exception.TokenlessAuthConfigError, expected_msg, auth._build_idp_id) keystone-9.0.0/keystone/tests/unit/test_revoke.py0000664000567000056710000005674412701407102023422 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import uuid import mock from oslo_utils import timeutils from six.moves import range from testtools import matchers from keystone.common import utils from keystone import exception from keystone.models import revoke_model from keystone.tests import unit from keystone.tests.unit import test_backend_sql from keystone.token import provider def _new_id(): return uuid.uuid4().hex def _future_time(): expire_delta = datetime.timedelta(seconds=1000) future_time = timeutils.utcnow() + expire_delta return future_time def _past_time(): expire_delta = datetime.timedelta(days=-1000) past_time = timeutils.utcnow() + expire_delta return past_time def _sample_blank_token(): issued_delta = datetime.timedelta(minutes=-2) issued_at = timeutils.utcnow() + issued_delta token_data = revoke_model.blank_token_data(issued_at) return token_data def _matches(event, token_values): """See if the token matches the revocation event. Used as a secondary check on the logic to Check By Tree Below: This is abrute force approach to checking. Compare each attribute from the event with the corresponding value from the token. If the event does not have a value for the attribute, a match is still possible. If the event has a value for the attribute, and it does not match the token, no match is possible, so skip the remaining checks. :param event: one revocation event to match :param token_values: dictionary with set of values taken from the token :returns: True if the token matches the revocation event, indicating the token has been revoked """ # The token has three attributes that can match the user_id if event.user_id is not None: for attribute_name in ['user_id', 'trustor_id', 'trustee_id']: if event.user_id == token_values[attribute_name]: break else: return False # The token has two attributes that can match the domain_id if event.domain_id is not None: for attribute_name in ['identity_domain_id', 'assignment_domain_id']: if event.domain_id == token_values[attribute_name]: break else: return False if event.domain_scope_id is not None: if event.domain_scope_id != token_values['assignment_domain_id']: return False # If any one check does not match, the while token does # not match the event. The numerous return False indicate # that the token is still valid and short-circuits the # rest of the logic. attribute_names = ['project_id', 'expires_at', 'trust_id', 'consumer_id', 'access_token_id', 'audit_id', 'audit_chain_id'] for attribute_name in attribute_names: if getattr(event, attribute_name) is not None: if (getattr(event, attribute_name) != token_values[attribute_name]): return False if event.role_id is not None: roles = token_values['roles'] for role in roles: if event.role_id == role: break else: return False if token_values['issued_at'] > event.issued_before: return False return True class RevokeTests(object): def test_list(self): self.revoke_api.revoke_by_user(user_id=1) self.assertEqual(1, len(self.revoke_api.list_events())) self.revoke_api.revoke_by_user(user_id=2) self.assertEqual(2, len(self.revoke_api.list_events())) def test_list_since(self): self.revoke_api.revoke_by_user(user_id=1) self.revoke_api.revoke_by_user(user_id=2) past = timeutils.utcnow() - datetime.timedelta(seconds=1000) self.assertEqual(2, len(self.revoke_api.list_events(last_fetch=past))) future = timeutils.utcnow() + datetime.timedelta(seconds=1000) self.assertEqual(0, len(self.revoke_api.list_events(last_fetch=future))) def test_past_expiry_are_removed(self): user_id = 1 self.revoke_api.revoke_by_expiration(user_id, _future_time()) self.assertEqual(1, len(self.revoke_api.list_events())) event = revoke_model.RevokeEvent() event.revoked_at = _past_time() self.revoke_api.revoke(event) self.assertEqual(1, len(self.revoke_api.list_events())) @mock.patch.object(timeutils, 'utcnow') def test_expired_events_removed_validate_token_success(self, mock_utcnow): def _sample_token_values(): token = _sample_blank_token() token['expires_at'] = utils.isotime(_future_time(), subsecond=True) return token now = datetime.datetime.utcnow() now_plus_2h = now + datetime.timedelta(hours=2) mock_utcnow.return_value = now # Build a token and validate it. This will seed the cache for the # future 'synchronize' call. token_values = _sample_token_values() user_id = _new_id() self.revoke_api.revoke_by_user(user_id) token_values['user_id'] = user_id self.assertRaises(exception.TokenNotFound, self.revoke_api.check_token, token_values) # Move our clock forward by 2h, build a new token and validate it. # 'synchronize' should now be exercised and remove old expired events mock_utcnow.return_value = now_plus_2h self.revoke_api.revoke_by_expiration(_new_id(), now_plus_2h) # should no longer throw an exception self.revoke_api.check_token(token_values) def test_revoke_by_expiration_project_and_domain_fails(self): user_id = _new_id() expires_at = utils.isotime(_future_time(), subsecond=True) domain_id = _new_id() project_id = _new_id() self.assertThat( lambda: self.revoke_api.revoke_by_expiration( user_id, expires_at, domain_id=domain_id, project_id=project_id), matchers.raises(exception.UnexpectedError)) class SqlRevokeTests(test_backend_sql.SqlTests, RevokeTests): def config_overrides(self): super(SqlRevokeTests, self).config_overrides() self.config_fixture.config( group='token', provider='pki', revoke_by_id=False) class RevokeTreeTests(unit.TestCase): def setUp(self): super(RevokeTreeTests, self).setUp() self.events = [] self.tree = revoke_model.RevokeTree() self._sample_data() def _sample_data(self): user_ids = [] project_ids = [] role_ids = [] for i in range(0, 3): user_ids.append(_new_id()) project_ids.append(_new_id()) role_ids.append(_new_id()) project_tokens = [] i = len(project_tokens) project_tokens.append(_sample_blank_token()) project_tokens[i]['user_id'] = user_ids[0] project_tokens[i]['project_id'] = project_ids[0] project_tokens[i]['roles'] = [role_ids[1]] i = len(project_tokens) project_tokens.append(_sample_blank_token()) project_tokens[i]['user_id'] = user_ids[1] project_tokens[i]['project_id'] = project_ids[0] project_tokens[i]['roles'] = [role_ids[0]] i = len(project_tokens) project_tokens.append(_sample_blank_token()) project_tokens[i]['user_id'] = user_ids[0] project_tokens[i]['project_id'] = project_ids[1] project_tokens[i]['roles'] = [role_ids[0]] token_to_revoke = _sample_blank_token() token_to_revoke['user_id'] = user_ids[0] token_to_revoke['project_id'] = project_ids[0] token_to_revoke['roles'] = [role_ids[0]] self.project_tokens = project_tokens self.user_ids = user_ids self.project_ids = project_ids self.role_ids = role_ids self.token_to_revoke = token_to_revoke def _assertTokenRevoked(self, token_data): self.assertTrue(any([_matches(e, token_data) for e in self.events])) return self.assertTrue(self.tree.is_revoked(token_data), 'Token should be revoked') def _assertTokenNotRevoked(self, token_data): self.assertFalse(any([_matches(e, token_data) for e in self.events])) return self.assertFalse(self.tree.is_revoked(token_data), 'Token should not be revoked') def _revoke_by_user(self, user_id): return self.tree.add_event( revoke_model.RevokeEvent(user_id=user_id)) def _revoke_by_audit_id(self, audit_id): event = self.tree.add_event( revoke_model.RevokeEvent(audit_id=audit_id)) self.events.append(event) return event def _revoke_by_audit_chain_id(self, audit_chain_id, project_id=None, domain_id=None): event = self.tree.add_event( revoke_model.RevokeEvent(audit_chain_id=audit_chain_id, project_id=project_id, domain_id=domain_id) ) self.events.append(event) return event def _revoke_by_expiration(self, user_id, expires_at, project_id=None, domain_id=None): event = self.tree.add_event( revoke_model.RevokeEvent(user_id=user_id, expires_at=expires_at, project_id=project_id, domain_id=domain_id)) self.events.append(event) return event def _revoke_by_grant(self, role_id, user_id=None, domain_id=None, project_id=None): event = self.tree.add_event( revoke_model.RevokeEvent(user_id=user_id, role_id=role_id, domain_id=domain_id, project_id=project_id)) self.events.append(event) return event def _revoke_by_user_and_project(self, user_id, project_id): event = self.tree.add_event( revoke_model.RevokeEvent(project_id=project_id, user_id=user_id)) self.events.append(event) return event def _revoke_by_project_role_assignment(self, project_id, role_id): event = self.tree.add_event( revoke_model.RevokeEvent(project_id=project_id, role_id=role_id)) self.events.append(event) return event def _revoke_by_domain_role_assignment(self, domain_id, role_id): event = self.tree.add_event( revoke_model.RevokeEvent(domain_id=domain_id, role_id=role_id)) self.events.append(event) return event def _revoke_by_domain(self, domain_id): event = self.tree.add_event( revoke_model.RevokeEvent(domain_id=domain_id)) self.events.append(event) def _user_field_test(self, field_name): user_id = _new_id() event = self._revoke_by_user(user_id) self.events.append(event) token_data_u1 = _sample_blank_token() token_data_u1[field_name] = user_id self._assertTokenRevoked(token_data_u1) token_data_u2 = _sample_blank_token() token_data_u2[field_name] = _new_id() self._assertTokenNotRevoked(token_data_u2) self.tree.remove_event(event) self.events.remove(event) self._assertTokenNotRevoked(token_data_u1) def test_revoke_by_user(self): self._user_field_test('user_id') def test_revoke_by_user_matches_trustee(self): self._user_field_test('trustee_id') def test_revoke_by_user_matches_trustor(self): self._user_field_test('trustor_id') def test_by_user_expiration(self): future_time = _future_time() user_id = 1 event = self._revoke_by_expiration(user_id, future_time) token_data_1 = _sample_blank_token() token_data_1['user_id'] = user_id token_data_1['expires_at'] = future_time.replace(microsecond=0) self._assertTokenRevoked(token_data_1) token_data_2 = _sample_blank_token() token_data_2['user_id'] = user_id expire_delta = datetime.timedelta(seconds=2000) future_time = timeutils.utcnow() + expire_delta token_data_2['expires_at'] = future_time self._assertTokenNotRevoked(token_data_2) self.remove_event(event) self._assertTokenNotRevoked(token_data_1) def test_revoke_by_audit_id(self): audit_id = provider.audit_info(parent_audit_id=None)[0] token_data_1 = _sample_blank_token() # Audit ID and Audit Chain ID are populated with the same value # if the token is an original token token_data_1['audit_id'] = audit_id token_data_1['audit_chain_id'] = audit_id event = self._revoke_by_audit_id(audit_id) self._assertTokenRevoked(token_data_1) audit_id_2 = provider.audit_info(parent_audit_id=audit_id)[0] token_data_2 = _sample_blank_token() token_data_2['audit_id'] = audit_id_2 token_data_2['audit_chain_id'] = audit_id self._assertTokenNotRevoked(token_data_2) self.remove_event(event) self._assertTokenNotRevoked(token_data_1) def test_revoke_by_audit_chain_id(self): audit_id = provider.audit_info(parent_audit_id=None)[0] token_data_1 = _sample_blank_token() # Audit ID and Audit Chain ID are populated with the same value # if the token is an original token token_data_1['audit_id'] = audit_id token_data_1['audit_chain_id'] = audit_id event = self._revoke_by_audit_chain_id(audit_id) self._assertTokenRevoked(token_data_1) audit_id_2 = provider.audit_info(parent_audit_id=audit_id)[0] token_data_2 = _sample_blank_token() token_data_2['audit_id'] = audit_id_2 token_data_2['audit_chain_id'] = audit_id self._assertTokenRevoked(token_data_2) self.remove_event(event) self._assertTokenNotRevoked(token_data_1) self._assertTokenNotRevoked(token_data_2) def test_by_user_project(self): # When a user has a project-scoped token and the project-scoped token # is revoked then the token is revoked. user_id = _new_id() project_id = _new_id() future_time = _future_time() token_data = _sample_blank_token() token_data['user_id'] = user_id token_data['project_id'] = project_id token_data['expires_at'] = future_time.replace(microsecond=0) self._revoke_by_expiration(user_id, future_time, project_id=project_id) self._assertTokenRevoked(token_data) def test_by_user_domain(self): # When a user has a domain-scoped token and the domain-scoped token # is revoked then the token is revoked. user_id = _new_id() domain_id = _new_id() future_time = _future_time() token_data = _sample_blank_token() token_data['user_id'] = user_id token_data['assignment_domain_id'] = domain_id token_data['expires_at'] = future_time.replace(microsecond=0) self._revoke_by_expiration(user_id, future_time, domain_id=domain_id) self._assertTokenRevoked(token_data) def remove_event(self, event): self.events.remove(event) self.tree.remove_event(event) def test_by_project_grant(self): token_to_revoke = self.token_to_revoke tokens = self.project_tokens self._assertTokenNotRevoked(token_to_revoke) for token in tokens: self._assertTokenNotRevoked(token) event = self._revoke_by_grant(role_id=self.role_ids[0], user_id=self.user_ids[0], project_id=self.project_ids[0]) self._assertTokenRevoked(token_to_revoke) for token in tokens: self._assertTokenNotRevoked(token) self.remove_event(event) self._assertTokenNotRevoked(token_to_revoke) for token in tokens: self._assertTokenNotRevoked(token) token_to_revoke['roles'] = [self.role_ids[0], self.role_ids[1], self.role_ids[2]] event = self._revoke_by_grant(role_id=self.role_ids[0], user_id=self.user_ids[0], project_id=self.project_ids[0]) self._assertTokenRevoked(token_to_revoke) self.remove_event(event) self._assertTokenNotRevoked(token_to_revoke) event = self._revoke_by_grant(role_id=self.role_ids[1], user_id=self.user_ids[0], project_id=self.project_ids[0]) self._assertTokenRevoked(token_to_revoke) self.remove_event(event) self._assertTokenNotRevoked(token_to_revoke) self._revoke_by_grant(role_id=self.role_ids[0], user_id=self.user_ids[0], project_id=self.project_ids[0]) self._revoke_by_grant(role_id=self.role_ids[1], user_id=self.user_ids[0], project_id=self.project_ids[0]) self._revoke_by_grant(role_id=self.role_ids[2], user_id=self.user_ids[0], project_id=self.project_ids[0]) self._assertTokenRevoked(token_to_revoke) def test_by_project_and_user_and_role(self): user_id1 = _new_id() user_id2 = _new_id() project_id = _new_id() self.events.append(self._revoke_by_user(user_id1)) self.events.append( self._revoke_by_user_and_project(user_id2, project_id)) token_data = _sample_blank_token() token_data['user_id'] = user_id2 token_data['project_id'] = project_id self._assertTokenRevoked(token_data) def test_by_domain_user(self): # If revoke a domain, then a token for a user in the domain is revoked user_id = _new_id() domain_id = _new_id() token_data = _sample_blank_token() token_data['user_id'] = user_id token_data['identity_domain_id'] = domain_id self._revoke_by_domain(domain_id) self._assertTokenRevoked(token_data) def test_by_domain_project(self): # If revoke a domain, then a token scoped to a project in the domain # is revoked. user_id = _new_id() user_domain_id = _new_id() project_id = _new_id() project_domain_id = _new_id() token_data = _sample_blank_token() token_data['user_id'] = user_id token_data['identity_domain_id'] = user_domain_id token_data['project_id'] = project_id token_data['assignment_domain_id'] = project_domain_id self._revoke_by_domain(project_domain_id) self._assertTokenRevoked(token_data) def test_by_domain_domain(self): # If revoke a domain, then a token scoped to the domain is revoked. user_id = _new_id() user_domain_id = _new_id() domain_id = _new_id() token_data = _sample_blank_token() token_data['user_id'] = user_id token_data['identity_domain_id'] = user_domain_id token_data['assignment_domain_id'] = domain_id self._revoke_by_domain(domain_id) self._assertTokenRevoked(token_data) def _assertEmpty(self, collection): return self.assertEqual(0, len(collection), "collection not empty") def _assertEventsMatchIteration(self, turn): self.assertEqual(1, len(self.tree.revoke_map)) self.assertEqual(turn + 1, len(self.tree.revoke_map ['trust_id=*'] ['consumer_id=*'] ['access_token_id=*'] ['audit_id=*'] ['audit_chain_id=*'])) # two different functions add domain_ids, +1 for None self.assertEqual(2 * turn + 1, len(self.tree.revoke_map ['trust_id=*'] ['consumer_id=*'] ['access_token_id=*'] ['audit_id=*'] ['audit_chain_id=*'] ['expires_at=*'])) # two different functions add project_ids, +1 for None self.assertEqual(2 * turn + 1, len(self.tree.revoke_map ['trust_id=*'] ['consumer_id=*'] ['access_token_id=*'] ['audit_id=*'] ['audit_chain_id=*'] ['expires_at=*'] ['domain_id=*'])) # 10 users added self.assertEqual(turn, len(self.tree.revoke_map ['trust_id=*'] ['consumer_id=*'] ['access_token_id=*'] ['audit_id=*'] ['audit_chain_id=*'] ['expires_at=*'] ['domain_id=*'] ['project_id=*'])) def test_cleanup(self): events = self.events self._assertEmpty(self.tree.revoke_map) expiry_base_time = _future_time() for i in range(0, 10): events.append( self._revoke_by_user(_new_id())) args = (_new_id(), expiry_base_time + datetime.timedelta(seconds=i)) events.append( self._revoke_by_expiration(*args)) self.assertEqual(i + 2, len(self.tree.revoke_map ['trust_id=*'] ['consumer_id=*'] ['access_token_id=*'] ['audit_id=*'] ['audit_chain_id=*']), 'adding %s to %s' % (args, self.tree.revoke_map)) events.append( self._revoke_by_project_role_assignment(_new_id(), _new_id())) events.append( self._revoke_by_domain_role_assignment(_new_id(), _new_id())) events.append( self._revoke_by_domain_role_assignment(_new_id(), _new_id())) events.append( self._revoke_by_user_and_project(_new_id(), _new_id())) self._assertEventsMatchIteration(i + 1) for event in self.events: self.tree.remove_event(event) self._assertEmpty(self.tree.revoke_map) keystone-9.0.0/keystone/tests/unit/test_v3_resource.py0000664000567000056710000016315712701407105024366 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from oslo_config import cfg from six.moves import http_client from six.moves import range from testtools import matchers from keystone.common import controller from keystone import exception from keystone.tests import unit from keystone.tests.unit import test_v3 from keystone.tests.unit import utils as test_utils CONF = cfg.CONF class ResourceTestCase(test_v3.RestfulTestCase, test_v3.AssignmentTestMixin): """Test domains and projects.""" # Domain CRUD tests def test_create_domain(self): """Call ``POST /domains``.""" ref = unit.new_domain_ref() r = self.post( '/domains', body={'domain': ref}) return self.assertValidDomainResponse(r, ref) def test_create_domain_case_sensitivity(self): """Call `POST /domains`` twice with upper() and lower() cased name.""" ref = unit.new_domain_ref() # ensure the name is lowercase ref['name'] = ref['name'].lower() r = self.post( '/domains', body={'domain': ref}) self.assertValidDomainResponse(r, ref) # ensure the name is uppercase ref['name'] = ref['name'].upper() r = self.post( '/domains', body={'domain': ref}) self.assertValidDomainResponse(r, ref) def test_create_domain_bad_request(self): """Call ``POST /domains``.""" self.post('/domains', body={'domain': {}}, expected_status=http_client.BAD_REQUEST) def test_create_domain_unsafe(self): """Call ``POST /domains with unsafe names``.""" unsafe_name = 'i am not / safe' self.config_fixture.config(group='resource', domain_name_url_safe='off') ref = unit.new_domain_ref(name=unsafe_name) self.post( '/domains', body={'domain': ref}) for config_setting in ['new', 'strict']: self.config_fixture.config(group='resource', domain_name_url_safe=config_setting) ref = unit.new_domain_ref(name=unsafe_name) self.post( '/domains', body={'domain': ref}, expected_status=http_client.BAD_REQUEST) def test_create_domain_unsafe_default(self): """Check default for unsafe names for ``POST /domains``.""" unsafe_name = 'i am not / safe' # By default, we should be able to create unsafe names ref = unit.new_domain_ref(name=unsafe_name) self.post( '/domains', body={'domain': ref}) def test_create_domain_creates_is_domain_project(self): """Check a project that acts as a domain is created. Call ``POST /domains``. """ # Create a new domain domain_ref = unit.new_domain_ref() r = self.post('/domains', body={'domain': domain_ref}) self.assertValidDomainResponse(r, domain_ref) # Retrieve its correspondent project r = self.get('/projects/%(project_id)s' % { 'project_id': r.result['domain']['id']}) self.assertValidProjectResponse(r) # The created project has is_domain flag as True self.assertTrue(r.result['project']['is_domain']) # And its parent_id and domain_id attributes are equal self.assertIsNone(r.result['project']['parent_id']) self.assertIsNone(r.result['project']['domain_id']) def test_create_is_domain_project_creates_domain(self): """Call ``POST /projects`` is_domain and check a domain is created.""" # Create a new project that acts as a domain project_ref = unit.new_project_ref(domain_id=None, is_domain=True) r = self.post('/projects', body={'project': project_ref}) self.assertValidProjectResponse(r) # Retrieve its correspondent domain r = self.get('/domains/%(domain_id)s' % { 'domain_id': r.result['project']['id']}) self.assertValidDomainResponse(r) self.assertIsNotNone(r.result['domain']) def test_list_domains(self): """Call ``GET /domains``.""" resource_url = '/domains' r = self.get(resource_url) self.assertValidDomainListResponse(r, ref=self.domain, resource_url=resource_url) def test_get_domain(self): """Call ``GET /domains/{domain_id}``.""" r = self.get('/domains/%(domain_id)s' % { 'domain_id': self.domain_id}) self.assertValidDomainResponse(r, self.domain) def test_update_domain(self): """Call ``PATCH /domains/{domain_id}``.""" ref = unit.new_domain_ref() del ref['id'] r = self.patch('/domains/%(domain_id)s' % { 'domain_id': self.domain_id}, body={'domain': ref}) self.assertValidDomainResponse(r, ref) def test_update_domain_unsafe(self): """Call ``POST /domains/{domain_id} with unsafe names``.""" unsafe_name = 'i am not / safe' self.config_fixture.config(group='resource', domain_name_url_safe='off') ref = unit.new_domain_ref(name=unsafe_name) del ref['id'] self.patch('/domains/%(domain_id)s' % { 'domain_id': self.domain_id}, body={'domain': ref}) unsafe_name = 'i am still not / safe' for config_setting in ['new', 'strict']: self.config_fixture.config(group='resource', domain_name_url_safe=config_setting) ref = unit.new_domain_ref(name=unsafe_name) del ref['id'] self.patch('/domains/%(domain_id)s' % { 'domain_id': self.domain_id}, body={'domain': ref}, expected_status=http_client.BAD_REQUEST) def test_update_domain_unsafe_default(self): """Check default for unsafe names for ``POST /domains``.""" unsafe_name = 'i am not / safe' # By default, we should be able to create unsafe names ref = unit.new_domain_ref(name=unsafe_name) del ref['id'] self.patch('/domains/%(domain_id)s' % { 'domain_id': self.domain_id}, body={'domain': ref}) def test_update_domain_updates_is_domain_project(self): """Check the project that acts as a domain is updated. Call ``PATCH /domains``. """ # Create a new domain domain_ref = unit.new_domain_ref() r = self.post('/domains', body={'domain': domain_ref}) self.assertValidDomainResponse(r, domain_ref) # Disable it self.patch('/domains/%s' % r.result['domain']['id'], body={'domain': {'enabled': False}}) # Retrieve its correspondent project r = self.get('/projects/%(project_id)s' % { 'project_id': r.result['domain']['id']}) self.assertValidProjectResponse(r) # The created project is disabled as well self.assertFalse(r.result['project']['enabled']) def test_disable_domain(self): """Call ``PATCH /domains/{domain_id}`` (set enabled=False).""" # Create a 2nd set of entities in a 2nd domain domain2 = unit.new_domain_ref() self.resource_api.create_domain(domain2['id'], domain2) project2 = unit.new_project_ref(domain_id=domain2['id']) self.resource_api.create_project(project2['id'], project2) user2 = unit.create_user(self.identity_api, domain_id=domain2['id'], project_id=project2['id']) self.assignment_api.add_user_to_project(project2['id'], user2['id']) # First check a user in that domain can authenticate.. body = { 'auth': { 'passwordCredentials': { 'userId': user2['id'], 'password': user2['password'] }, 'tenantId': project2['id'] } } self.admin_request( path='/v2.0/tokens', method='POST', body=body) auth_data = self.build_authentication_request( user_id=user2['id'], password=user2['password'], project_id=project2['id']) self.v3_create_token(auth_data) # Now disable the domain domain2['enabled'] = False r = self.patch('/domains/%(domain_id)s' % { 'domain_id': domain2['id']}, body={'domain': {'enabled': False}}) self.assertValidDomainResponse(r, domain2) # Make sure the user can no longer authenticate, via # either API body = { 'auth': { 'passwordCredentials': { 'userId': user2['id'], 'password': user2['password'] }, 'tenantId': project2['id'] } } self.admin_request( path='/v2.0/tokens', method='POST', body=body, expected_status=http_client.UNAUTHORIZED) # Try looking up in v3 by name and id auth_data = self.build_authentication_request( user_id=user2['id'], password=user2['password'], project_id=project2['id']) self.v3_create_token(auth_data, expected_status=http_client.UNAUTHORIZED) auth_data = self.build_authentication_request( username=user2['name'], user_domain_id=domain2['id'], password=user2['password'], project_id=project2['id']) self.v3_create_token(auth_data, expected_status=http_client.UNAUTHORIZED) def test_delete_enabled_domain_fails(self): """Call ``DELETE /domains/{domain_id}`` (when domain enabled).""" # Try deleting an enabled domain, which should fail self.delete('/domains/%(domain_id)s' % { 'domain_id': self.domain['id']}, expected_status=exception.ForbiddenAction.code) def test_delete_domain(self): """Call ``DELETE /domains/{domain_id}``. The sample data set up already has a user and project that is part of self.domain. Additionally we will create a group and a credential within it. Since the user we will authenticate with is in this domain, we create a another set of entities in a second domain. Deleting this second domain should delete all these new entities. In addition, all the entities in the regular self.domain should be unaffected by the delete. Test Plan: - Create domain2 and a 2nd set of entities - Disable domain2 - Delete domain2 - Check entities in domain2 have been deleted - Check entities in self.domain are unaffected """ # Create a group and a credential in the main domain group = unit.new_group_ref(domain_id=self.domain_id) group = self.identity_api.create_group(group) credential = unit.new_credential_ref(user_id=self.user['id'], project_id=self.project_id) self.credential_api.create_credential(credential['id'], credential) # Create a 2nd set of entities in a 2nd domain domain2 = unit.new_domain_ref() self.resource_api.create_domain(domain2['id'], domain2) project2 = unit.new_project_ref(domain_id=domain2['id']) project2 = self.resource_api.create_project(project2['id'], project2) user2 = unit.new_user_ref(domain_id=domain2['id'], project_id=project2['id']) user2 = self.identity_api.create_user(user2) group2 = unit.new_group_ref(domain_id=domain2['id']) group2 = self.identity_api.create_group(group2) credential2 = unit.new_credential_ref(user_id=user2['id'], project_id=project2['id']) self.credential_api.create_credential(credential2['id'], credential2) # Now disable the new domain and delete it domain2['enabled'] = False r = self.patch('/domains/%(domain_id)s' % { 'domain_id': domain2['id']}, body={'domain': {'enabled': False}}) self.assertValidDomainResponse(r, domain2) self.delete('/domains/%(domain_id)s' % {'domain_id': domain2['id']}) # Check all the domain2 relevant entities are gone self.assertRaises(exception.DomainNotFound, self.resource_api.get_domain, domain2['id']) self.assertRaises(exception.ProjectNotFound, self.resource_api.get_project, project2['id']) self.assertRaises(exception.GroupNotFound, self.identity_api.get_group, group2['id']) self.assertRaises(exception.UserNotFound, self.identity_api.get_user, user2['id']) self.assertRaises(exception.CredentialNotFound, self.credential_api.get_credential, credential2['id']) # ...and that all self.domain entities are still here r = self.resource_api.get_domain(self.domain['id']) self.assertDictEqual(self.domain, r) r = self.resource_api.get_project(self.project['id']) self.assertDictEqual(self.project, r) r = self.identity_api.get_group(group['id']) self.assertDictEqual(group, r) r = self.identity_api.get_user(self.user['id']) self.user.pop('password') self.assertDictEqual(self.user, r) r = self.credential_api.get_credential(credential['id']) self.assertDictEqual(credential, r) def test_delete_domain_deletes_is_domain_project(self): """Check the project that acts as a domain is deleted. Call ``DELETE /domains``. """ # Create a new domain domain_ref = unit.new_domain_ref() r = self.post('/domains', body={'domain': domain_ref}) self.assertValidDomainResponse(r, domain_ref) # Retrieve its correspondent project self.get('/projects/%(project_id)s' % { 'project_id': r.result['domain']['id']}) # Delete the domain self.patch('/domains/%s' % r.result['domain']['id'], body={'domain': {'enabled': False}}) self.delete('/domains/%s' % r.result['domain']['id']) # The created project is deleted as well self.get('/projects/%(project_id)s' % { 'project_id': r.result['domain']['id']}, expected_status=404) def test_delete_default_domain(self): # Need to disable it first. self.patch('/domains/%(domain_id)s' % { 'domain_id': CONF.identity.default_domain_id}, body={'domain': {'enabled': False}}) self.delete( '/domains/%(domain_id)s' % { 'domain_id': CONF.identity.default_domain_id}) def test_token_revoked_once_domain_disabled(self): """Test token from a disabled domain has been invalidated. Test that a token that was valid for an enabled domain becomes invalid once that domain is disabled. """ domain = unit.new_domain_ref() self.resource_api.create_domain(domain['id'], domain) user2 = unit.create_user(self.identity_api, domain_id=domain['id']) # build a request body auth_body = self.build_authentication_request( user_id=user2['id'], password=user2['password']) # sends a request for the user's token token_resp = self.post('/auth/tokens', body=auth_body) subject_token = token_resp.headers.get('x-subject-token') # validates the returned token and it should be valid. self.head('/auth/tokens', headers={'x-subject-token': subject_token}, expected_status=http_client.OK) # now disable the domain domain['enabled'] = False url = "/domains/%(domain_id)s" % {'domain_id': domain['id']} self.patch(url, body={'domain': {'enabled': False}}) # validates the same token again and it should be 'not found' # as the domain has already been disabled. self.head('/auth/tokens', headers={'x-subject-token': subject_token}, expected_status=http_client.NOT_FOUND) def test_delete_domain_hierarchy(self): """Call ``DELETE /domains/{domain_id}``.""" domain = unit.new_domain_ref() self.resource_api.create_domain(domain['id'], domain) root_project = unit.new_project_ref(domain_id=domain['id']) root_project = self.resource_api.create_project(root_project['id'], root_project) leaf_project = unit.new_project_ref( domain_id=domain['id'], parent_id=root_project['id']) self.resource_api.create_project(leaf_project['id'], leaf_project) # Need to disable it first. self.patch('/domains/%(domain_id)s' % { 'domain_id': domain['id']}, body={'domain': {'enabled': False}}) self.delete( '/domains/%(domain_id)s' % { 'domain_id': domain['id']}) self.assertRaises(exception.DomainNotFound, self.resource_api.get_domain, domain['id']) self.assertRaises(exception.ProjectNotFound, self.resource_api.get_project, root_project['id']) self.assertRaises(exception.ProjectNotFound, self.resource_api.get_project, leaf_project['id']) def test_forbid_operations_on_federated_domain(self): """Make sure one cannot operate on federated domain. This includes operations like create, update, delete on domain identified by id and name where difference variations of id 'Federated' are used. """ def create_domains(): for variation in ('Federated', 'FEDERATED', 'federated', 'fEderated'): domain = unit.new_domain_ref() domain['id'] = variation yield domain for domain in create_domains(): self.assertRaises( AssertionError, self.resource_api.create_domain, domain['id'], domain) self.assertRaises( AssertionError, self.resource_api.update_domain, domain['id'], domain) self.assertRaises( exception.DomainNotFound, self.resource_api.delete_domain, domain['id']) # swap 'name' with 'id' and try again, expecting the request to # gracefully fail domain['id'], domain['name'] = domain['name'], domain['id'] self.assertRaises( AssertionError, self.resource_api.create_domain, domain['id'], domain) self.assertRaises( AssertionError, self.resource_api.update_domain, domain['id'], domain) self.assertRaises( exception.DomainNotFound, self.resource_api.delete_domain, domain['id']) def test_forbid_operations_on_defined_federated_domain(self): """Make sure one cannot operate on a user-defined federated domain. This includes operations like create, update, delete. """ non_default_name = 'beta_federated_domain' self.config_fixture.config(group='federation', federated_domain_name=non_default_name) domain = unit.new_domain_ref(name=non_default_name) self.assertRaises(AssertionError, self.resource_api.create_domain, domain['id'], domain) self.assertRaises(exception.DomainNotFound, self.resource_api.delete_domain, domain['id']) self.assertRaises(AssertionError, self.resource_api.update_domain, domain['id'], domain) # Project CRUD tests def test_list_projects(self): """Call ``GET /projects``.""" resource_url = '/projects' r = self.get(resource_url) self.assertValidProjectListResponse(r, ref=self.project, resource_url=resource_url) def test_create_project(self): """Call ``POST /projects``.""" ref = unit.new_project_ref(domain_id=self.domain_id) r = self.post( '/projects', body={'project': ref}) self.assertValidProjectResponse(r, ref) def test_create_project_bad_request(self): """Call ``POST /projects``.""" self.post('/projects', body={'project': {}}, expected_status=http_client.BAD_REQUEST) def test_create_project_invalid_domain_id(self): """Call ``POST /projects``.""" ref = unit.new_project_ref(domain_id=uuid.uuid4().hex) self.post('/projects', body={'project': ref}, expected_status=http_client.BAD_REQUEST) def test_create_project_unsafe(self): """Call ``POST /projects with unsafe names``.""" unsafe_name = 'i am not / safe' self.config_fixture.config(group='resource', project_name_url_safe='off') ref = unit.new_project_ref(name=unsafe_name) self.post( '/projects', body={'project': ref}) for config_setting in ['new', 'strict']: self.config_fixture.config(group='resource', project_name_url_safe=config_setting) ref = unit.new_project_ref(name=unsafe_name) self.post( '/projects', body={'project': ref}, expected_status=http_client.BAD_REQUEST) def test_create_project_unsafe_default(self): """Check default for unsafe names for ``POST /projects``.""" unsafe_name = 'i am not / safe' # By default, we should be able to create unsafe names ref = unit.new_project_ref(name=unsafe_name) self.post( '/projects', body={'project': ref}) def test_create_project_with_parent_id_none_and_domain_id_none(self): """Call ``POST /projects``.""" # Grant a domain role for the user collection_url = ( '/domains/%(domain_id)s/users/%(user_id)s/roles' % { 'domain_id': self.domain_id, 'user_id': self.user['id']}) member_url = '%(collection_url)s/%(role_id)s' % { 'collection_url': collection_url, 'role_id': self.role_id} self.put(member_url) # Create an authentication request for a domain scoped token auth = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], domain_id=self.domain_id) # Without parent_id and domain_id passed as None, the domain_id should # be normalized to the domain on the token, when using a domain # scoped token. ref = unit.new_project_ref() r = self.post( '/projects', auth=auth, body={'project': ref}) ref['domain_id'] = self.domain['id'] self.assertValidProjectResponse(r, ref) def test_create_project_without_parent_id_and_without_domain_id(self): """Call ``POST /projects``.""" # Grant a domain role for the user collection_url = ( '/domains/%(domain_id)s/users/%(user_id)s/roles' % { 'domain_id': self.domain_id, 'user_id': self.user['id']}) member_url = '%(collection_url)s/%(role_id)s' % { 'collection_url': collection_url, 'role_id': self.role_id} self.put(member_url) # Create an authentication request for a domain scoped token auth = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], domain_id=self.domain_id) # Without domain_id and parent_id, the domain_id should be # normalized to the domain on the token, when using a domain # scoped token. ref = unit.new_project_ref() r = self.post( '/projects', auth=auth, body={'project': ref}) ref['domain_id'] = self.domain['id'] self.assertValidProjectResponse(r, ref) @test_utils.wip('waiting for support for parent_id to imply domain_id') def test_create_project_with_parent_id_and_no_domain_id(self): """Call ``POST /projects``.""" # With only the parent_id, the domain_id should be # normalized to the parent's domain_id ref_child = unit.new_project_ref(parent_id=self.project['id']) r = self.post( '/projects', body={'project': ref_child}) self.assertEqual(r.result['project']['domain_id'], self.project['domain_id']) ref_child['domain_id'] = self.domain['id'] self.assertValidProjectResponse(r, ref_child) def _create_projects_hierarchy(self, hierarchy_size=1): """Creates a single-branched project hierarchy with the specified size. :param hierarchy_size: the desired hierarchy size, default is 1 - a project with one child. :returns projects: a list of the projects in the created hierarchy. """ new_ref = unit.new_project_ref(domain_id=self.domain_id) resp = self.post('/projects', body={'project': new_ref}) projects = [resp.result] for i in range(hierarchy_size): new_ref = unit.new_project_ref( domain_id=self.domain_id, parent_id=projects[i]['project']['id']) resp = self.post('/projects', body={'project': new_ref}) self.assertValidProjectResponse(resp, new_ref) projects.append(resp.result) return projects def test_list_projects_filtering_by_parent_id(self): """Call ``GET /projects?parent_id={project_id}``.""" projects = self._create_projects_hierarchy(hierarchy_size=2) # Add another child to projects[1] - it will be projects[3] new_ref = unit.new_project_ref( domain_id=self.domain_id, parent_id=projects[1]['project']['id']) resp = self.post('/projects', body={'project': new_ref}) self.assertValidProjectResponse(resp, new_ref) projects.append(resp.result) # Query for projects[0] immediate children - it will # be only projects[1] r = self.get( '/projects?parent_id=%(project_id)s' % { 'project_id': projects[0]['project']['id']}) self.assertValidProjectListResponse(r) projects_result = r.result['projects'] expected_list = [projects[1]['project']] # projects[0] has projects[1] as child self.assertEqual(expected_list, projects_result) # Query for projects[1] immediate children - it will # be projects[2] and projects[3] r = self.get( '/projects?parent_id=%(project_id)s' % { 'project_id': projects[1]['project']['id']}) self.assertValidProjectListResponse(r) projects_result = r.result['projects'] expected_list = [projects[2]['project'], projects[3]['project']] # projects[1] has projects[2] and projects[3] as children self.assertEqual(expected_list, projects_result) # Query for projects[2] immediate children - it will be an empty list r = self.get( '/projects?parent_id=%(project_id)s' % { 'project_id': projects[2]['project']['id']}) self.assertValidProjectListResponse(r) projects_result = r.result['projects'] expected_list = [] # projects[2] has no child, projects_result must be an empty list self.assertEqual(expected_list, projects_result) def test_create_hierarchical_project(self): """Call ``POST /projects``.""" self._create_projects_hierarchy() def test_get_project(self): """Call ``GET /projects/{project_id}``.""" r = self.get( '/projects/%(project_id)s' % { 'project_id': self.project_id}) self.assertValidProjectResponse(r, self.project) def test_get_project_with_parents_as_list_with_invalid_id(self): """Call ``GET /projects/{project_id}?parents_as_list``.""" self.get('/projects/%(project_id)s?parents_as_list' % { 'project_id': None}, expected_status=http_client.NOT_FOUND) self.get('/projects/%(project_id)s?parents_as_list' % { 'project_id': uuid.uuid4().hex}, expected_status=http_client.NOT_FOUND) def test_get_project_with_subtree_as_list_with_invalid_id(self): """Call ``GET /projects/{project_id}?subtree_as_list``.""" self.get('/projects/%(project_id)s?subtree_as_list' % { 'project_id': None}, expected_status=http_client.NOT_FOUND) self.get('/projects/%(project_id)s?subtree_as_list' % { 'project_id': uuid.uuid4().hex}, expected_status=http_client.NOT_FOUND) def test_get_project_with_parents_as_ids(self): """Call ``GET /projects/{project_id}?parents_as_ids``.""" projects = self._create_projects_hierarchy(hierarchy_size=2) # Query for projects[2] parents_as_ids r = self.get( '/projects/%(project_id)s?parents_as_ids' % { 'project_id': projects[2]['project']['id']}) self.assertValidProjectResponse(r, projects[2]['project']) parents_as_ids = r.result['project']['parents'] # Assert parents_as_ids is a structured dictionary correctly # representing the hierarchy. The request was made using projects[2] # id, hence its parents should be projects[1], projects[0] and the # is_domain_project, which is the root of the hierarchy. It should # have the following structure: # { # projects[1]: { # projects[0]: { # is_domain_project: None # } # } # } is_domain_project_id = projects[0]['project']['domain_id'] expected_dict = { projects[1]['project']['id']: { projects[0]['project']['id']: {is_domain_project_id: None} } } self.assertDictEqual(expected_dict, parents_as_ids) # Query for projects[0] parents_as_ids r = self.get( '/projects/%(project_id)s?parents_as_ids' % { 'project_id': projects[0]['project']['id']}) self.assertValidProjectResponse(r, projects[0]['project']) parents_as_ids = r.result['project']['parents'] # projects[0] has only the project that acts as a domain as parent expected_dict = { is_domain_project_id: None } self.assertDictEqual(expected_dict, parents_as_ids) # Query for is_domain_project parents_as_ids r = self.get( '/projects/%(project_id)s?parents_as_ids' % { 'project_id': is_domain_project_id}) parents_as_ids = r.result['project']['parents'] # the project that acts as a domain has no parents, parents_as_ids # must be None self.assertIsNone(parents_as_ids) def test_get_project_with_parents_as_list_with_full_access(self): """``GET /projects/{project_id}?parents_as_list`` with full access. Test plan: - Create 'parent', 'project' and 'subproject' projects; - Assign a user a role on each one of those projects; - Check that calling parents_as_list on 'subproject' returns both 'project' and 'parent'. """ # Create the project hierarchy parent, project, subproject = self._create_projects_hierarchy(2) # Assign a role for the user on all the created projects for proj in (parent, project, subproject): self.put(self.build_role_assignment_link( role_id=self.role_id, user_id=self.user_id, project_id=proj['project']['id'])) # Make the API call r = self.get('/projects/%(project_id)s?parents_as_list' % {'project_id': subproject['project']['id']}) self.assertValidProjectResponse(r, subproject['project']) # Assert only 'project' and 'parent' are in the parents list self.assertIn(project, r.result['project']['parents']) self.assertIn(parent, r.result['project']['parents']) self.assertEqual(2, len(r.result['project']['parents'])) def test_get_project_with_parents_as_list_with_partial_access(self): """``GET /projects/{project_id}?parents_as_list`` with partial access. Test plan: - Create 'parent', 'project' and 'subproject' projects; - Assign a user a role on 'parent' and 'subproject'; - Check that calling parents_as_list on 'subproject' only returns 'parent'. """ # Create the project hierarchy parent, project, subproject = self._create_projects_hierarchy(2) # Assign a role for the user on parent and subproject for proj in (parent, subproject): self.put(self.build_role_assignment_link( role_id=self.role_id, user_id=self.user_id, project_id=proj['project']['id'])) # Make the API call r = self.get('/projects/%(project_id)s?parents_as_list' % {'project_id': subproject['project']['id']}) self.assertValidProjectResponse(r, subproject['project']) # Assert only 'parent' is in the parents list self.assertIn(parent, r.result['project']['parents']) self.assertEqual(1, len(r.result['project']['parents'])) def test_get_project_with_parents_as_list_and_parents_as_ids(self): """Attempt to list a project's parents as both a list and as IDs. This uses ``GET /projects/{project_id}?parents_as_list&parents_as_ids`` which should fail with a Bad Request due to the conflicting query strings. """ projects = self._create_projects_hierarchy(hierarchy_size=2) self.get( '/projects/%(project_id)s?parents_as_list&parents_as_ids' % { 'project_id': projects[1]['project']['id']}, expected_status=http_client.BAD_REQUEST) def test_list_project_is_domain_filter(self): """Call ``GET /projects?is_domain=True/False``.""" # Get the initial number of projects, both acting as a domain as well # as regular. r = self.get('/projects?is_domain=True', expected_status=200) initial_number_is_domain_true = len(r.result['projects']) r = self.get('/projects?is_domain=False', expected_status=200) initial_number_is_domain_false = len(r.result['projects']) # Add some more projects acting as domains new_is_domain_project = unit.new_project_ref(is_domain=True) new_is_domain_project = self.resource_api.create_project( new_is_domain_project['id'], new_is_domain_project) new_is_domain_project2 = unit.new_project_ref(is_domain=True) new_is_domain_project2 = self.resource_api.create_project( new_is_domain_project2['id'], new_is_domain_project2) number_is_domain_true = initial_number_is_domain_true + 2 r = self.get('/projects?is_domain=True', expected_status=200) self.assertThat(r.result['projects'], matchers.HasLength(number_is_domain_true)) self.assertIn(new_is_domain_project['id'], [p['id'] for p in r.result['projects']]) self.assertIn(new_is_domain_project2['id'], [p['id'] for p in r.result['projects']]) # Now add a regular project new_regular_project = unit.new_project_ref(domain_id=self.domain_id) new_regular_project = self.resource_api.create_project( new_regular_project['id'], new_regular_project) number_is_domain_false = initial_number_is_domain_false + 1 # Check we still have the same number of projects acting as domains r = self.get('/projects?is_domain=True', expected_status=200) self.assertThat(r.result['projects'], matchers.HasLength(number_is_domain_true)) # Check the number of regular projects is correct r = self.get('/projects?is_domain=False', expected_status=200) self.assertThat(r.result['projects'], matchers.HasLength(number_is_domain_false)) self.assertIn(new_regular_project['id'], [p['id'] for p in r.result['projects']]) def test_list_project_is_domain_filter_default(self): """Default project list should not see projects acting as domains""" # Get the initial count of regular projects r = self.get('/projects?is_domain=False', expected_status=200) number_is_domain_false = len(r.result['projects']) # Make sure we have at least one project acting as a domain new_is_domain_project = unit.new_project_ref(is_domain=True) new_is_domain_project = self.resource_api.create_project( new_is_domain_project['id'], new_is_domain_project) r = self.get('/projects', expected_status=200) self.assertThat(r.result['projects'], matchers.HasLength(number_is_domain_false)) self.assertNotIn(new_is_domain_project, r.result['projects']) def test_get_project_with_subtree_as_ids(self): """Call ``GET /projects/{project_id}?subtree_as_ids``. This test creates a more complex hierarchy to test if the structured dictionary returned by using the ``subtree_as_ids`` query param correctly represents the hierarchy. The hierarchy contains 5 projects with the following structure:: +--A--+ | | +--B--+ C | | D E """ projects = self._create_projects_hierarchy(hierarchy_size=2) # Add another child to projects[0] - it will be projects[3] new_ref = unit.new_project_ref( domain_id=self.domain_id, parent_id=projects[0]['project']['id']) resp = self.post('/projects', body={'project': new_ref}) self.assertValidProjectResponse(resp, new_ref) projects.append(resp.result) # Add another child to projects[1] - it will be projects[4] new_ref = unit.new_project_ref( domain_id=self.domain_id, parent_id=projects[1]['project']['id']) resp = self.post('/projects', body={'project': new_ref}) self.assertValidProjectResponse(resp, new_ref) projects.append(resp.result) # Query for projects[0] subtree_as_ids r = self.get( '/projects/%(project_id)s?subtree_as_ids' % { 'project_id': projects[0]['project']['id']}) self.assertValidProjectResponse(r, projects[0]['project']) subtree_as_ids = r.result['project']['subtree'] # The subtree hierarchy from projects[0] should have the following # structure: # { # projects[1]: { # projects[2]: None, # projects[4]: None # }, # projects[3]: None # } expected_dict = { projects[1]['project']['id']: { projects[2]['project']['id']: None, projects[4]['project']['id']: None }, projects[3]['project']['id']: None } self.assertDictEqual(expected_dict, subtree_as_ids) # Now query for projects[1] subtree_as_ids r = self.get( '/projects/%(project_id)s?subtree_as_ids' % { 'project_id': projects[1]['project']['id']}) self.assertValidProjectResponse(r, projects[1]['project']) subtree_as_ids = r.result['project']['subtree'] # The subtree hierarchy from projects[1] should have the following # structure: # { # projects[2]: None, # projects[4]: None # } expected_dict = { projects[2]['project']['id']: None, projects[4]['project']['id']: None } self.assertDictEqual(expected_dict, subtree_as_ids) # Now query for projects[3] subtree_as_ids r = self.get( '/projects/%(project_id)s?subtree_as_ids' % { 'project_id': projects[3]['project']['id']}) self.assertValidProjectResponse(r, projects[3]['project']) subtree_as_ids = r.result['project']['subtree'] # projects[3] has no subtree, subtree_as_ids must be None self.assertIsNone(subtree_as_ids) def test_get_project_with_subtree_as_list_with_full_access(self): """``GET /projects/{project_id}?subtree_as_list`` with full access. Test plan: - Create 'parent', 'project' and 'subproject' projects; - Assign a user a role on each one of those projects; - Check that calling subtree_as_list on 'parent' returns both 'parent' and 'subproject'. """ # Create the project hierarchy parent, project, subproject = self._create_projects_hierarchy(2) # Assign a role for the user on all the created projects for proj in (parent, project, subproject): self.put(self.build_role_assignment_link( role_id=self.role_id, user_id=self.user_id, project_id=proj['project']['id'])) # Make the API call r = self.get('/projects/%(project_id)s?subtree_as_list' % {'project_id': parent['project']['id']}) self.assertValidProjectResponse(r, parent['project']) # Assert only 'project' and 'subproject' are in the subtree self.assertIn(project, r.result['project']['subtree']) self.assertIn(subproject, r.result['project']['subtree']) self.assertEqual(2, len(r.result['project']['subtree'])) def test_get_project_with_subtree_as_list_with_partial_access(self): """``GET /projects/{project_id}?subtree_as_list`` with partial access. Test plan: - Create 'parent', 'project' and 'subproject' projects; - Assign a user a role on 'parent' and 'subproject'; - Check that calling subtree_as_list on 'parent' returns 'subproject'. """ # Create the project hierarchy parent, project, subproject = self._create_projects_hierarchy(2) # Assign a role for the user on parent and subproject for proj in (parent, subproject): self.put(self.build_role_assignment_link( role_id=self.role_id, user_id=self.user_id, project_id=proj['project']['id'])) # Make the API call r = self.get('/projects/%(project_id)s?subtree_as_list' % {'project_id': parent['project']['id']}) self.assertValidProjectResponse(r, parent['project']) # Assert only 'subproject' is in the subtree self.assertIn(subproject, r.result['project']['subtree']) self.assertEqual(1, len(r.result['project']['subtree'])) def test_get_project_with_subtree_as_list_and_subtree_as_ids(self): """Attempt to get a project subtree as both a list and as IDs. This uses ``GET /projects/{project_id}?subtree_as_list&subtree_as_ids`` which should fail with a bad request due to the conflicting query strings. """ projects = self._create_projects_hierarchy(hierarchy_size=2) self.get( '/projects/%(project_id)s?subtree_as_list&subtree_as_ids' % { 'project_id': projects[1]['project']['id']}, expected_status=http_client.BAD_REQUEST) def test_update_project(self): """Call ``PATCH /projects/{project_id}``.""" ref = unit.new_project_ref(domain_id=self.domain_id, parent_id=self.project['parent_id']) del ref['id'] r = self.patch( '/projects/%(project_id)s' % { 'project_id': self.project_id}, body={'project': ref}) self.assertValidProjectResponse(r, ref) def test_update_project_unsafe(self): """Call ``POST /projects/{project_id} with unsafe names``.""" unsafe_name = 'i am not / safe' self.config_fixture.config(group='resource', project_name_url_safe='off') ref = unit.new_project_ref(name=unsafe_name, domain_id=self.domain_id, parent_id=self.project['parent_id']) del ref['id'] self.patch( '/projects/%(project_id)s' % { 'project_id': self.project_id}, body={'project': ref}) unsafe_name = 'i am still not / safe' for config_setting in ['new', 'strict']: self.config_fixture.config(group='resource', project_name_url_safe=config_setting) ref = unit.new_project_ref(name=unsafe_name, domain_id=self.domain_id, parent_id=self.project['parent_id']) del ref['id'] self.patch( '/projects/%(project_id)s' % { 'project_id': self.project_id}, body={'project': ref}, expected_status=http_client.BAD_REQUEST) def test_update_project_unsafe_default(self): """Check default for unsafe names for ``POST /projects``.""" unsafe_name = 'i am not / safe' # By default, we should be able to create unsafe names ref = unit.new_project_ref(name=unsafe_name, domain_id=self.domain_id, parent_id=self.project['parent_id']) del ref['id'] self.patch( '/projects/%(project_id)s' % { 'project_id': self.project_id}, body={'project': ref}) def test_update_project_domain_id(self): """Call ``PATCH /projects/{project_id}`` with domain_id.""" project = unit.new_project_ref(domain_id=self.domain['id']) project = self.resource_api.create_project(project['id'], project) project['domain_id'] = CONF.identity.default_domain_id r = self.patch('/projects/%(project_id)s' % { 'project_id': project['id']}, body={'project': project}, expected_status=exception.ValidationError.code) self.config_fixture.config(domain_id_immutable=False) project['domain_id'] = self.domain['id'] r = self.patch('/projects/%(project_id)s' % { 'project_id': project['id']}, body={'project': project}) self.assertValidProjectResponse(r, project) def test_update_project_parent_id(self): """Call ``PATCH /projects/{project_id}``.""" projects = self._create_projects_hierarchy() leaf_project = projects[1]['project'] leaf_project['parent_id'] = None self.patch( '/projects/%(project_id)s' % { 'project_id': leaf_project['id']}, body={'project': leaf_project}, expected_status=http_client.FORBIDDEN) def test_update_project_is_domain_not_allowed(self): """Call ``PATCH /projects/{project_id}`` with is_domain. The is_domain flag is immutable. """ project = unit.new_project_ref(domain_id=self.domain['id']) resp = self.post('/projects', body={'project': project}) self.assertFalse(resp.result['project']['is_domain']) project['parent_id'] = resp.result['project']['parent_id'] project['is_domain'] = True self.patch('/projects/%(project_id)s' % { 'project_id': resp.result['project']['id']}, body={'project': project}, expected_status=http_client.BAD_REQUEST) def test_disable_leaf_project(self): """Call ``PATCH /projects/{project_id}``.""" projects = self._create_projects_hierarchy() leaf_project = projects[1]['project'] leaf_project['enabled'] = False r = self.patch( '/projects/%(project_id)s' % { 'project_id': leaf_project['id']}, body={'project': leaf_project}) self.assertEqual( leaf_project['enabled'], r.result['project']['enabled']) def test_disable_not_leaf_project(self): """Call ``PATCH /projects/{project_id}``.""" projects = self._create_projects_hierarchy() root_project = projects[0]['project'] root_project['enabled'] = False self.patch( '/projects/%(project_id)s' % { 'project_id': root_project['id']}, body={'project': root_project}, expected_status=http_client.FORBIDDEN) def test_delete_project(self): """Call ``DELETE /projects/{project_id}`` As well as making sure the delete succeeds, we ensure that any credentials that reference this projects are also deleted, while other credentials are unaffected. """ credential = unit.new_credential_ref(user_id=self.user['id'], project_id=self.project_id) self.credential_api.create_credential(credential['id'], credential) # First check the credential for this project is present r = self.credential_api.get_credential(credential['id']) self.assertDictEqual(credential, r) # Create a second credential with a different project project2 = unit.new_project_ref(domain_id=self.domain['id']) self.resource_api.create_project(project2['id'], project2) credential2 = unit.new_credential_ref(user_id=self.user['id'], project_id=project2['id']) self.credential_api.create_credential(credential2['id'], credential2) # Now delete the project self.delete( '/projects/%(project_id)s' % { 'project_id': self.project_id}) # Deleting the project should have deleted any credentials # that reference this project self.assertRaises(exception.CredentialNotFound, self.credential_api.get_credential, credential_id=credential['id']) # But the credential for project2 is unaffected r = self.credential_api.get_credential(credential2['id']) self.assertDictEqual(credential2, r) def test_delete_not_leaf_project(self): """Call ``DELETE /projects/{project_id}``.""" projects = self._create_projects_hierarchy() self.delete( '/projects/%(project_id)s' % { 'project_id': projects[0]['project']['id']}, expected_status=http_client.FORBIDDEN) class ResourceV3toV2MethodsTestCase(unit.TestCase): """Test domain V3 to V2 conversion methods.""" def _setup_initial_projects(self): self.project_id = uuid.uuid4().hex self.domain_id = CONF.identity.default_domain_id self.parent_id = uuid.uuid4().hex # Project with only domain_id in ref self.project1 = unit.new_project_ref(id=self.project_id, name=self.project_id, domain_id=self.domain_id) # Project with both domain_id and parent_id in ref self.project2 = unit.new_project_ref(id=self.project_id, name=self.project_id, domain_id=self.domain_id, parent_id=self.parent_id) # Project with no domain_id and parent_id in ref self.project3 = unit.new_project_ref(id=self.project_id, name=self.project_id, domain_id=self.domain_id, parent_id=self.parent_id) # Expected result with no domain_id and parent_id self.expected_project = {'id': self.project_id, 'name': self.project_id} def test_v2controller_filter_domain_id(self): # V2.0 is not domain aware, ensure domain_id is popped off the ref. other_data = uuid.uuid4().hex domain_id = CONF.identity.default_domain_id ref = {'domain_id': domain_id, 'other_data': other_data} ref_no_domain = {'other_data': other_data} expected_ref = ref_no_domain.copy() updated_ref = controller.V2Controller.filter_domain_id(ref) self.assertIs(ref, updated_ref) self.assertDictEqual(expected_ref, ref) # Make sure we don't error/muck up data if domain_id isn't present updated_ref = controller.V2Controller.filter_domain_id(ref_no_domain) self.assertIs(ref_no_domain, updated_ref) self.assertDictEqual(expected_ref, ref_no_domain) def test_v3controller_filter_domain_id(self): # No data should be filtered out in this case. other_data = uuid.uuid4().hex domain_id = uuid.uuid4().hex ref = {'domain_id': domain_id, 'other_data': other_data} expected_ref = ref.copy() updated_ref = controller.V3Controller.filter_domain_id(ref) self.assertIs(ref, updated_ref) self.assertDictEqual(expected_ref, ref) def test_v2controller_filter_domain(self): other_data = uuid.uuid4().hex domain_id = uuid.uuid4().hex non_default_domain_ref = {'domain': {'id': domain_id}, 'other_data': other_data} default_domain_ref = {'domain': {'id': 'default'}, 'other_data': other_data} updated_ref = controller.V2Controller.filter_domain(default_domain_ref) self.assertNotIn('domain', updated_ref) self.assertNotIn( 'domain', controller.V2Controller.filter_domain(non_default_domain_ref)) def test_v2controller_filter_project_parent_id(self): # V2.0 is not project hierarchy aware, ensure parent_id is popped off. other_data = uuid.uuid4().hex parent_id = uuid.uuid4().hex ref = {'parent_id': parent_id, 'other_data': other_data} ref_no_parent = {'other_data': other_data} expected_ref = ref_no_parent.copy() updated_ref = controller.V2Controller.filter_project_parent_id(ref) self.assertIs(ref, updated_ref) self.assertDictEqual(expected_ref, ref) # Make sure we don't error/muck up data if parent_id isn't present updated_ref = controller.V2Controller.filter_project_parent_id( ref_no_parent) self.assertIs(ref_no_parent, updated_ref) self.assertDictEqual(expected_ref, ref_no_parent) def test_v3_to_v2_project_method(self): self._setup_initial_projects() # TODO(shaleh): these optional fields are not handled well by the # v3_to_v2 code. Manually remove them for now. Eventually update # new_project_ref to not return optional values del self.project1['enabled'] del self.project1['description'] del self.project2['enabled'] del self.project2['description'] del self.project3['enabled'] del self.project3['description'] updated_project1 = controller.V2Controller.v3_to_v2_project( self.project1) self.assertIs(self.project1, updated_project1) self.assertDictEqual(self.expected_project, self.project1) updated_project2 = controller.V2Controller.v3_to_v2_project( self.project2) self.assertIs(self.project2, updated_project2) self.assertDictEqual(self.expected_project, self.project2) updated_project3 = controller.V2Controller.v3_to_v2_project( self.project3) self.assertIs(self.project3, updated_project3) self.assertDictEqual(self.expected_project, self.project2) def test_v3_to_v2_project_method_list(self): self._setup_initial_projects() project_list = [self.project1, self.project2, self.project3] # TODO(shaleh): these optional fields are not handled well by the # v3_to_v2 code. Manually remove them for now. Eventually update # new_project_ref to not return optional values for p in project_list: del p['enabled'] del p['description'] updated_list = controller.V2Controller.v3_to_v2_project(project_list) self.assertEqual(len(updated_list), len(project_list)) for i, ref in enumerate(updated_list): # Order should not change. self.assertIs(ref, project_list[i]) self.assertDictEqual(self.expected_project, self.project1) self.assertDictEqual(self.expected_project, self.project2) self.assertDictEqual(self.expected_project, self.project3) keystone-9.0.0/keystone/tests/unit/test_ipv6.py0000664000567000056710000000330312701407102022772 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from keystone.common import environment from keystone.tests import unit from keystone.tests.unit.ksfixtures import appserver CONF = cfg.CONF class IPv6TestCase(unit.TestCase): def setUp(self): self.skip_if_no_ipv6() super(IPv6TestCase, self).setUp() self.load_backends() def test_ipv6_ok(self): """Make sure both public and admin API work with ipv6.""" paste_conf = self._paste_config('keystone') # Verify Admin with appserver.AppServer(paste_conf, appserver.ADMIN, host="::1"): conn = environment.httplib.HTTPConnection( '::1', CONF.eventlet_server.admin_port) conn.request('GET', '/') resp = conn.getresponse() self.assertEqual(300, resp.status) # Verify Public with appserver.AppServer(paste_conf, appserver.MAIN, host="::1"): conn = environment.httplib.HTTPConnection( '::1', CONF.eventlet_server.public_port) conn.request('GET', '/') resp = conn.getresponse() self.assertEqual(300, resp.status) keystone-9.0.0/keystone/tests/unit/filtering.py0000664000567000056710000001115612701407102023037 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from oslo_config import cfg from six.moves import range CONF = cfg.CONF class FilterTests(object): # Provide support for checking if a batch of list items all # exist within a contiguous range in a total list def _match_with_list(self, this_batch, total_list, batch_size=None, list_start=None, list_end=None): if batch_size is None: batch_size = len(this_batch) if list_start is None: list_start = 0 if list_end is None: list_end = len(total_list) for batch_item in range(0, batch_size): found = False for list_item in range(list_start, list_end): if this_batch[batch_item]['id'] == total_list[list_item]['id']: found = True self.assertTrue(found) def _create_entity(self, entity_type): """Find the create_ method. Searches through the [identity_api, resource_api, assignment_api] managers for a method called create_ and returns the first one. """ f = getattr(self.identity_api, 'create_%s' % entity_type, None) if f is None: f = getattr(self.resource_api, 'create_%s' % entity_type, None) if f is None: f = getattr(self.assignment_api, 'create_%s' % entity_type) return f def _delete_entity(self, entity_type): """Find the delete_ method. Searches through the [identity_api, resource_api, assignment_api] managers for a method called delete_ and returns the first one. """ f = getattr(self.identity_api, 'delete_%s' % entity_type, None) if f is None: f = getattr(self.resource_api, 'delete_%s' % entity_type, None) if f is None: f = getattr(self.assignment_api, 'delete_%s' % entity_type) return f def _list_entities(self, entity_type): """Find the list_ method. Searches through the [identity_api, resource_api, assignment_api] managers for a method called list_ and returns the first one. """ f = getattr(self.identity_api, 'list_%ss' % entity_type, None) if f is None: f = getattr(self.resource_api, 'list_%ss' % entity_type, None) if f is None: f = getattr(self.assignment_api, 'list_%ss' % entity_type) return f def _create_one_entity(self, entity_type, domain_id, name): new_entity = {'name': name, 'domain_id': domain_id} if entity_type in ['user', 'group']: # The manager layer creates the ID for users and groups new_entity = self._create_entity(entity_type)(new_entity) else: new_entity['id'] = '0000' + uuid.uuid4().hex self._create_entity(entity_type)(new_entity['id'], new_entity) return new_entity def _create_test_data(self, entity_type, number, domain_id=None, name_dict=None): """Create entity test data :param entity_type: type of entity to create, e.g. 'user', group' etc. :param number: number of entities to create, :param domain_id: if not defined, all users will be created in the default domain. :param name_dict: optional dict containing entity number and name pairs """ entity_list = [] if domain_id is None: domain_id = CONF.identity.default_domain_id name_dict = name_dict or {} for x in range(number): # If this index has a name defined in the name_dict, then use it name = name_dict.get(x, uuid.uuid4().hex) new_entity = self._create_one_entity(entity_type, domain_id, name) entity_list.append(new_entity) return entity_list def _delete_test_data(self, entity_type, entity_list): for entity in entity_list: self._delete_entity(entity_type)(entity['id']) keystone-9.0.0/keystone/tests/unit/test_sql_livetest.py0000664000567000056710000000334712701407105024637 0ustar jenkinsjenkins00000000000000# Copyright 2013 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.tests import unit from keystone.tests.unit import test_sql_upgrade class PostgresqlMigrateTests(test_sql_upgrade.SqlUpgradeTests): def setUp(self): self.skip_if_env_not_set('ENABLE_LIVE_POSTGRES_TEST') super(PostgresqlMigrateTests, self).setUp() def config_files(self): files = super(PostgresqlMigrateTests, self).config_files() files.append(unit.dirs.tests_conf("backend_postgresql.conf")) return files class MysqlMigrateTests(test_sql_upgrade.SqlUpgradeTests): def setUp(self): self.skip_if_env_not_set('ENABLE_LIVE_MYSQL_TEST') super(MysqlMigrateTests, self).setUp() def config_files(self): files = super(MysqlMigrateTests, self).config_files() files.append(unit.dirs.tests_conf("backend_mysql.conf")) return files class Db2MigrateTests(test_sql_upgrade.SqlUpgradeTests): def setUp(self): self.skip_if_env_not_set('ENABLE_LIVE_DB2_TEST') super(Db2MigrateTests, self).setUp() def config_files(self): files = super(Db2MigrateTests, self).config_files() files.append(unit.dirs.tests_conf("backend_db2.conf")) return files keystone-9.0.0/keystone/tests/unit/test_v3.py0000664000567000056710000017401612701407105022453 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import mock from oslo_config import cfg import oslo_context.context from oslo_serialization import jsonutils from oslo_utils import timeutils from six.moves import http_client from testtools import matchers import webtest from keystone import auth from keystone.common import authorization from keystone.common import cache from keystone.common.validation import validators from keystone import exception from keystone import middleware from keystone.middleware import auth as middleware_auth from keystone.tests.common import auth as common_auth from keystone.tests import unit from keystone.tests.unit import rest CONF = cfg.CONF DEFAULT_DOMAIN_ID = 'default' TIME_FORMAT = unit.TIME_FORMAT class AuthTestMixin(object): """To hold auth building helper functions.""" def build_auth_scope(self, project_id=None, project_name=None, project_domain_id=None, project_domain_name=None, domain_id=None, domain_name=None, trust_id=None, unscoped=None): scope_data = {} if unscoped: scope_data['unscoped'] = {} if project_id or project_name: scope_data['project'] = {} if project_id: scope_data['project']['id'] = project_id else: scope_data['project']['name'] = project_name if project_domain_id or project_domain_name: project_domain_json = {} if project_domain_id: project_domain_json['id'] = project_domain_id else: project_domain_json['name'] = project_domain_name scope_data['project']['domain'] = project_domain_json if domain_id or domain_name: scope_data['domain'] = {} if domain_id: scope_data['domain']['id'] = domain_id else: scope_data['domain']['name'] = domain_name if trust_id: scope_data['OS-TRUST:trust'] = {} scope_data['OS-TRUST:trust']['id'] = trust_id return scope_data def build_password_auth(self, user_id=None, username=None, user_domain_id=None, user_domain_name=None, password=None): password_data = {'user': {}} if user_id: password_data['user']['id'] = user_id else: password_data['user']['name'] = username if user_domain_id or user_domain_name: password_data['user']['domain'] = {} if user_domain_id: password_data['user']['domain']['id'] = user_domain_id else: password_data['user']['domain']['name'] = user_domain_name password_data['user']['password'] = password return password_data def build_token_auth(self, token): return {'id': token} def build_authentication_request(self, token=None, user_id=None, username=None, user_domain_id=None, user_domain_name=None, password=None, kerberos=False, **kwargs): """Build auth dictionary. It will create an auth dictionary based on all the arguments that it receives. """ auth_data = {} auth_data['identity'] = {'methods': []} if kerberos: auth_data['identity']['methods'].append('kerberos') auth_data['identity']['kerberos'] = {} if token: auth_data['identity']['methods'].append('token') auth_data['identity']['token'] = self.build_token_auth(token) if user_id or username: auth_data['identity']['methods'].append('password') auth_data['identity']['password'] = self.build_password_auth( user_id, username, user_domain_id, user_domain_name, password) if kwargs: auth_data['scope'] = self.build_auth_scope(**kwargs) return {'auth': auth_data} class RestfulTestCase(unit.SQLDriverOverrides, rest.RestfulTestCase, common_auth.AuthTestMixin): def generate_token_schema(self, domain_scoped=False, project_scoped=False): """Return a dictionary of token properties to validate against.""" properties = { 'audit_ids': { 'type': 'array', 'items': { 'type': 'string', }, 'minItems': 1, 'maxItems': 2, }, 'bind': { 'type': 'object', 'properties': { 'kerberos': { 'type': 'string', }, }, 'required': ['kerberos'], 'additionalProperties': False, }, 'expires_at': {'type': 'string'}, 'issued_at': {'type': 'string'}, 'methods': { 'type': 'array', 'items': { 'type': 'string', }, }, 'user': { 'type': 'object', 'required': ['id', 'name', 'domain'], 'properties': { 'id': {'type': 'string'}, 'name': {'type': 'string'}, 'domain': { 'type': 'object', 'properties': { 'id': {'type': 'string'}, 'name': {'type': 'string'} }, 'required': ['id', 'name'], 'additonalProperties': False, } }, 'additionalProperties': False, } } if domain_scoped: properties['catalog'] = {'type': 'array'} properties['roles'] = { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'id': {'type': 'string', }, 'name': {'type': 'string', }, }, 'required': ['id', 'name', ], 'additionalProperties': False, }, 'minItems': 1, } properties['domain'] = { 'domain': { 'type': 'object', 'required': ['id', 'name'], 'properties': { 'id': {'type': 'string'}, 'name': {'type': 'string'} }, 'additionalProperties': False } } elif project_scoped: properties['is_admin_project'] = {'type': 'boolean'} properties['catalog'] = {'type': 'array'} properties['roles'] = {'type': 'array'} properties['project'] = { 'type': ['object'], 'required': ['id', 'name', 'domain'], 'properties': { 'id': {'type': 'string'}, 'name': {'type': 'string'}, 'domain': { 'type': ['object'], 'required': ['id', 'name'], 'properties': { 'id': {'type': 'string'}, 'name': {'type': 'string'} }, 'additionalProperties': False } }, 'additionalProperties': False } schema = { 'type': 'object', 'properties': properties, 'required': ['audit_ids', 'expires_at', 'issued_at', 'methods', 'user'], 'optional': ['bind'], 'additionalProperties': False } if domain_scoped: schema['required'].extend(['domain', 'roles']) schema['optional'].append('catalog') elif project_scoped: schema['required'].append('project') schema['optional'].append('bind') schema['optional'].append('catalog') schema['optional'].append('OS-TRUST:trust') schema['optional'].append('is_admin_project') return schema def config_files(self): config_files = super(RestfulTestCase, self).config_files() config_files.append(unit.dirs.tests_conf('backend_sql.conf')) return config_files def get_extensions(self): extensions = set(['revoke']) if hasattr(self, 'EXTENSION_NAME'): extensions.add(self.EXTENSION_NAME) return extensions def generate_paste_config(self): new_paste_file = None try: new_paste_file = unit.generate_paste_config(self.EXTENSION_TO_ADD) except AttributeError: # no need to report this error here, as most tests will not have # EXTENSION_TO_ADD defined. pass finally: return new_paste_file def remove_generated_paste_config(self): try: unit.remove_generated_paste_config(self.EXTENSION_TO_ADD) except AttributeError: pass def setUp(self, app_conf='keystone'): """Setup for v3 Restful Test Cases.""" new_paste_file = self.generate_paste_config() self.addCleanup(self.remove_generated_paste_config) if new_paste_file: app_conf = 'config:%s' % (new_paste_file) super(RestfulTestCase, self).setUp(app_conf=app_conf) self.empty_context = {'environment': {}} def load_backends(self): # ensure the cache region instance is setup cache.configure_cache() super(RestfulTestCase, self).load_backends() def load_fixtures(self, fixtures): self.load_sample_data() def _populate_default_domain(self): if CONF.database.connection == unit.IN_MEM_DB_CONN_STRING: # NOTE(morganfainberg): If an in-memory db is being used, be sure # to populate the default domain, this is typically done by # a migration, but the in-mem db uses model definitions to create # the schema (no migrations are run). try: self.resource_api.get_domain(DEFAULT_DOMAIN_ID) except exception.DomainNotFound: domain = unit.new_domain_ref( description=(u'The default domain'), id=DEFAULT_DOMAIN_ID, name=u'Default') self.resource_api.create_domain(DEFAULT_DOMAIN_ID, domain) def load_sample_data(self): self._populate_default_domain() self.domain = unit.new_domain_ref() self.domain_id = self.domain['id'] self.resource_api.create_domain(self.domain_id, self.domain) self.project = unit.new_project_ref(domain_id=self.domain_id) self.project_id = self.project['id'] self.project = self.resource_api.create_project(self.project_id, self.project) self.user = unit.create_user(self.identity_api, domain_id=self.domain_id) self.user_id = self.user['id'] self.default_domain_project_id = uuid.uuid4().hex self.default_domain_project = unit.new_project_ref( domain_id=DEFAULT_DOMAIN_ID) self.default_domain_project['id'] = self.default_domain_project_id self.resource_api.create_project(self.default_domain_project_id, self.default_domain_project) self.default_domain_user = unit.create_user( self.identity_api, domain_id=DEFAULT_DOMAIN_ID) self.default_domain_user_id = self.default_domain_user['id'] # create & grant policy.json's default role for admin_required self.role = unit.new_role_ref(name='admin') self.role_id = self.role['id'] self.role_api.create_role(self.role_id, self.role) self.assignment_api.add_role_to_user_and_project( self.user_id, self.project_id, self.role_id) self.assignment_api.add_role_to_user_and_project( self.default_domain_user_id, self.default_domain_project_id, self.role_id) self.assignment_api.add_role_to_user_and_project( self.default_domain_user_id, self.project_id, self.role_id) # Create "req_admin" user for simulating a real user instead of the # admin_token_auth middleware self.user_reqadmin = unit.create_user(self.identity_api, DEFAULT_DOMAIN_ID) self.assignment_api.add_role_to_user_and_project( self.user_reqadmin['id'], self.default_domain_project_id, self.role_id) self.region = unit.new_region_ref() self.region_id = self.region['id'] self.catalog_api.create_region(self.region) self.service = unit.new_service_ref() self.service_id = self.service['id'] self.catalog_api.create_service(self.service_id, self.service.copy()) self.endpoint = unit.new_endpoint_ref(service_id=self.service_id, interface='public', region_id=self.region_id) self.endpoint_id = self.endpoint['id'] self.catalog_api.create_endpoint(self.endpoint_id, self.endpoint.copy()) # The server adds 'enabled' and defaults to True. self.endpoint['enabled'] = True def create_new_default_project_for_user(self, user_id, domain_id, enable_project=True): ref = unit.new_project_ref(domain_id=domain_id, enabled=enable_project) r = self.post('/projects', body={'project': ref}) project = self.assertValidProjectResponse(r, ref) # set the user's preferred project body = {'user': {'default_project_id': project['id']}} r = self.patch('/users/%(user_id)s' % { 'user_id': user_id}, body=body) self.assertValidUserResponse(r) return project def get_admin_token(self): """Convenience method so that we can test authenticated requests.""" r = self.admin_request( method='POST', path='/v3/auth/tokens', body={ 'auth': { 'identity': { 'methods': ['password'], 'password': { 'user': { 'name': self.user_reqadmin['name'], 'password': self.user_reqadmin['password'], 'domain': { 'id': self.user_reqadmin['domain_id'] } } } }, 'scope': { 'project': { 'id': self.default_domain_project_id, } } } }) return r.headers.get('X-Subject-Token') def get_unscoped_token(self): """Convenience method so that we can test authenticated requests.""" r = self.admin_request( method='POST', path='/v3/auth/tokens', body={ 'auth': { 'identity': { 'methods': ['password'], 'password': { 'user': { 'name': self.user['name'], 'password': self.user['password'], 'domain': { 'id': self.user['domain_id'] } } } } } }) return r.headers.get('X-Subject-Token') def get_scoped_token(self): """Convenience method so that we can test authenticated requests.""" r = self.admin_request( method='POST', path='/v3/auth/tokens', body={ 'auth': { 'identity': { 'methods': ['password'], 'password': { 'user': { 'name': self.user['name'], 'password': self.user['password'], 'domain': { 'id': self.user['domain_id'] } } } }, 'scope': { 'project': { 'id': self.project['id'], } } } }) return r.headers.get('X-Subject-Token') def get_domain_scoped_token(self): """Convenience method for requesting domain scoped token.""" r = self.admin_request( method='POST', path='/v3/auth/tokens', body={ 'auth': { 'identity': { 'methods': ['password'], 'password': { 'user': { 'name': self.user['name'], 'password': self.user['password'], 'domain': { 'id': self.user['domain_id'] } } } }, 'scope': { 'domain': { 'id': self.domain['id'], } } } }) return r.headers.get('X-Subject-Token') def get_requested_token(self, auth): """Request the specific token we want.""" r = self.v3_create_token(auth) return r.headers.get('X-Subject-Token') def v3_create_token(self, auth, expected_status=http_client.CREATED): return self.admin_request(method='POST', path='/v3/auth/tokens', body=auth, expected_status=expected_status) def v3_noauth_request(self, path, **kwargs): # request does not require auth token header path = '/v3' + path return self.admin_request(path=path, **kwargs) def v3_request(self, path, **kwargs): # check to see if caller requires token for the API call. if kwargs.pop('noauth', None): return self.v3_noauth_request(path, **kwargs) # Check if the caller has passed in auth details for # use in requesting the token auth_arg = kwargs.pop('auth', None) if auth_arg: token = self.get_requested_token(auth_arg) else: token = kwargs.pop('token', None) if not token: token = self.get_scoped_token() path = '/v3' + path return self.admin_request(path=path, token=token, **kwargs) def get(self, path, expected_status=http_client.OK, **kwargs): return self.v3_request(path, method='GET', expected_status=expected_status, **kwargs) def head(self, path, expected_status=http_client.NO_CONTENT, **kwargs): r = self.v3_request(path, method='HEAD', expected_status=expected_status, **kwargs) self.assertEqual(b'', r.body) return r def post(self, path, expected_status=http_client.CREATED, **kwargs): return self.v3_request(path, method='POST', expected_status=expected_status, **kwargs) def put(self, path, expected_status=http_client.NO_CONTENT, **kwargs): return self.v3_request(path, method='PUT', expected_status=expected_status, **kwargs) def patch(self, path, expected_status=http_client.OK, **kwargs): return self.v3_request(path, method='PATCH', expected_status=expected_status, **kwargs) def delete(self, path, expected_status=http_client.NO_CONTENT, **kwargs): return self.v3_request(path, method='DELETE', expected_status=expected_status, **kwargs) def assertValidErrorResponse(self, r): resp = r.result self.assertIsNotNone(resp.get('error')) self.assertIsNotNone(resp['error'].get('code')) self.assertIsNotNone(resp['error'].get('title')) self.assertIsNotNone(resp['error'].get('message')) self.assertEqual(int(resp['error']['code']), r.status_code) def assertValidListLinks(self, links, resource_url=None): self.assertIsNotNone(links) self.assertIsNotNone(links.get('self')) self.assertThat(links['self'], matchers.StartsWith('http://localhost')) if resource_url: self.assertThat(links['self'], matchers.EndsWith(resource_url)) self.assertIn('next', links) if links['next'] is not None: self.assertThat(links['next'], matchers.StartsWith('http://localhost')) self.assertIn('previous', links) if links['previous'] is not None: self.assertThat(links['previous'], matchers.StartsWith('http://localhost')) def assertValidListResponse(self, resp, key, entity_validator, ref=None, expected_length=None, keys_to_check=None, resource_url=None): """Make assertions common to all API list responses. If a reference is provided, it's ID will be searched for in the response, and asserted to be equal. """ entities = resp.result.get(key) self.assertIsNotNone(entities) if expected_length is not None: self.assertEqual(expected_length, len(entities)) elif ref is not None: # we're at least expecting the ref self.assertNotEmpty(entities) # collections should have relational links self.assertValidListLinks(resp.result.get('links'), resource_url=resource_url) for entity in entities: self.assertIsNotNone(entity) self.assertValidEntity(entity, keys_to_check=keys_to_check) entity_validator(entity) if ref: entity = [x for x in entities if x['id'] == ref['id']][0] self.assertValidEntity(entity, ref=ref, keys_to_check=keys_to_check) entity_validator(entity, ref) return entities def assertValidResponse(self, resp, key, entity_validator, *args, **kwargs): """Make assertions common to all API responses.""" entity = resp.result.get(key) self.assertIsNotNone(entity) keys = kwargs.pop('keys_to_check', None) self.assertValidEntity(entity, keys_to_check=keys, *args, **kwargs) entity_validator(entity, *args, **kwargs) return entity def assertValidEntity(self, entity, ref=None, keys_to_check=None): """Make assertions common to all API entities. If a reference is provided, the entity will also be compared against the reference. """ if keys_to_check is not None: keys = keys_to_check else: keys = ['name', 'description', 'enabled'] for k in ['id'] + keys: msg = '%s unexpectedly None in %s' % (k, entity) self.assertIsNotNone(entity.get(k), msg) self.assertIsNotNone(entity.get('links')) self.assertIsNotNone(entity['links'].get('self')) self.assertThat(entity['links']['self'], matchers.StartsWith('http://localhost')) self.assertIn(entity['id'], entity['links']['self']) if ref: for k in keys: msg = '%s not equal: %s != %s' % (k, ref[k], entity[k]) self.assertEqual(ref[k], entity[k]) return entity # auth validation def assertValidISO8601ExtendedFormatDatetime(self, dt): try: return timeutils.parse_strtime(dt, fmt=TIME_FORMAT) except Exception: msg = '%s is not a valid ISO 8601 extended format date time.' % dt raise AssertionError(msg) def assertValidTokenResponse(self, r, user=None): self.assertTrue(r.headers.get('X-Subject-Token')) token = r.result['token'] self.assertIsNotNone(token.get('expires_at')) expires_at = self.assertValidISO8601ExtendedFormatDatetime( token['expires_at']) self.assertIsNotNone(token.get('issued_at')) issued_at = self.assertValidISO8601ExtendedFormatDatetime( token['issued_at']) self.assertTrue(issued_at < expires_at) self.assertIn('user', token) self.assertIn('id', token['user']) self.assertIn('name', token['user']) self.assertIn('domain', token['user']) self.assertIn('id', token['user']['domain']) if user is not None: self.assertEqual(user['id'], token['user']['id']) self.assertEqual(user['name'], token['user']['name']) self.assertEqual(user['domain_id'], token['user']['domain']['id']) return token def assertValidUnscopedTokenResponse(self, r, *args, **kwargs): token = self.assertValidTokenResponse(r, *args, **kwargs) validator_object = validators.SchemaValidator( self.generate_token_schema() ) validator_object.validate(token) return token def assertValidScopedTokenResponse(self, r, *args, **kwargs): require_catalog = kwargs.pop('require_catalog', True) endpoint_filter = kwargs.pop('endpoint_filter', False) ep_filter_assoc = kwargs.pop('ep_filter_assoc', 0) is_admin_project = kwargs.pop('is_admin_project', False) token = self.assertValidTokenResponse(r, *args, **kwargs) if require_catalog: endpoint_num = 0 self.assertIn('catalog', token) if isinstance(token['catalog'], list): # only test JSON for service in token['catalog']: for endpoint in service['endpoints']: self.assertNotIn('enabled', endpoint) self.assertNotIn('legacy_endpoint_id', endpoint) self.assertNotIn('service_id', endpoint) endpoint_num += 1 # sub test for the OS-EP-FILTER extension enabled if endpoint_filter: self.assertEqual(ep_filter_assoc, endpoint_num) else: self.assertNotIn('catalog', token) self.assertIn('roles', token) self.assertTrue(token['roles']) for role in token['roles']: self.assertIn('id', role) self.assertIn('name', role) if is_admin_project: # NOTE(samueldmq): We want to explicitly test for boolean self.assertIs(True, token['is_admin_project']) else: self.assertNotIn('is_admin_project', token) return token def assertValidProjectScopedTokenResponse(self, r, *args, **kwargs): token = self.assertValidScopedTokenResponse(r, *args, **kwargs) project_scoped_token_schema = self.generate_token_schema( project_scoped=True) if token.get('OS-TRUST:trust'): trust_properties = { 'OS-TRUST:trust': { 'type': ['object'], 'required': ['id', 'impersonation', 'trustor_user', 'trustee_user'], 'properties': { 'id': {'type': 'string'}, 'impersonation': {'type': 'boolean'}, 'trustor_user': { 'type': 'object', 'required': ['id'], 'properties': { 'id': {'type': 'string'} }, 'additionalProperties': False }, 'trustee_user': { 'type': 'object', 'required': ['id'], 'properties': { 'id': {'type': 'string'} }, 'additionalProperties': False } }, 'additionalProperties': False } } project_scoped_token_schema['properties'].update(trust_properties) validator_object = validators.SchemaValidator( project_scoped_token_schema) validator_object.validate(token) self.assertEqual(self.role_id, token['roles'][0]['id']) return token def assertValidDomainScopedTokenResponse(self, r, *args, **kwargs): token = self.assertValidScopedTokenResponse(r, *args, **kwargs) validator_object = validators.SchemaValidator( self.generate_token_schema(domain_scoped=True) ) validator_object.validate(token) return token def assertEqualTokens(self, a, b): """Assert that two tokens are equal. Compare two tokens except for their ids. This also truncates the time in the comparison. """ def normalize(token): del token['token']['expires_at'] del token['token']['issued_at'] return token a_expires_at = self.assertValidISO8601ExtendedFormatDatetime( a['token']['expires_at']) b_expires_at = self.assertValidISO8601ExtendedFormatDatetime( b['token']['expires_at']) self.assertCloseEnoughForGovernmentWork(a_expires_at, b_expires_at) a_issued_at = self.assertValidISO8601ExtendedFormatDatetime( a['token']['issued_at']) b_issued_at = self.assertValidISO8601ExtendedFormatDatetime( b['token']['issued_at']) self.assertCloseEnoughForGovernmentWork(a_issued_at, b_issued_at) return self.assertDictEqual(normalize(a), normalize(b)) # catalog validation def assertValidCatalogResponse(self, resp, *args, **kwargs): self.assertEqual(set(['catalog', 'links']), set(resp.json.keys())) self.assertValidCatalog(resp.json['catalog']) self.assertIn('links', resp.json) self.assertIsInstance(resp.json['links'], dict) self.assertEqual(['self'], list(resp.json['links'].keys())) self.assertEqual( 'http://localhost/v3/auth/catalog', resp.json['links']['self']) def assertValidCatalog(self, entity): self.assertIsInstance(entity, list) self.assertTrue(len(entity) > 0) for service in entity: self.assertIsNotNone(service.get('id')) self.assertIsNotNone(service.get('name')) self.assertIsNotNone(service.get('type')) self.assertNotIn('enabled', service) self.assertTrue(len(service['endpoints']) > 0) for endpoint in service['endpoints']: self.assertIsNotNone(endpoint.get('id')) self.assertIsNotNone(endpoint.get('interface')) self.assertIsNotNone(endpoint.get('url')) self.assertNotIn('enabled', endpoint) self.assertNotIn('legacy_endpoint_id', endpoint) self.assertNotIn('service_id', endpoint) # region validation def assertValidRegionListResponse(self, resp, *args, **kwargs): # NOTE(jaypipes): I have to pass in a blank keys_to_check parameter # below otherwise the base assertValidEntity method # tries to find a "name" and an "enabled" key in the # returned ref dicts. The issue is, I don't understand # how the service and endpoint entity assertions below # actually work (they don't raise assertions), since # AFAICT, the service and endpoint tables don't have # a "name" column either... :( return self.assertValidListResponse( resp, 'regions', self.assertValidRegion, keys_to_check=[], *args, **kwargs) def assertValidRegionResponse(self, resp, *args, **kwargs): return self.assertValidResponse( resp, 'region', self.assertValidRegion, keys_to_check=[], *args, **kwargs) def assertValidRegion(self, entity, ref=None): self.assertIsNotNone(entity.get('description')) if ref: self.assertEqual(ref['description'], entity['description']) return entity # service validation def assertValidServiceListResponse(self, resp, *args, **kwargs): return self.assertValidListResponse( resp, 'services', self.assertValidService, *args, **kwargs) def assertValidServiceResponse(self, resp, *args, **kwargs): return self.assertValidResponse( resp, 'service', self.assertValidService, *args, **kwargs) def assertValidService(self, entity, ref=None): self.assertIsNotNone(entity.get('type')) self.assertIsInstance(entity.get('enabled'), bool) if ref: self.assertEqual(ref['type'], entity['type']) return entity # endpoint validation def assertValidEndpointListResponse(self, resp, *args, **kwargs): return self.assertValidListResponse( resp, 'endpoints', self.assertValidEndpoint, *args, **kwargs) def assertValidEndpointResponse(self, resp, *args, **kwargs): return self.assertValidResponse( resp, 'endpoint', self.assertValidEndpoint, *args, **kwargs) def assertValidEndpoint(self, entity, ref=None): self.assertIsNotNone(entity.get('interface')) self.assertIsNotNone(entity.get('service_id')) self.assertIsInstance(entity['enabled'], bool) # this is intended to be an unexposed implementation detail self.assertNotIn('legacy_endpoint_id', entity) if ref: self.assertEqual(ref['interface'], entity['interface']) self.assertEqual(ref['service_id'], entity['service_id']) if ref.get('region') is not None: self.assertEqual(ref['region_id'], entity.get('region_id')) return entity # domain validation def assertValidDomainListResponse(self, resp, *args, **kwargs): return self.assertValidListResponse( resp, 'domains', self.assertValidDomain, *args, **kwargs) def assertValidDomainResponse(self, resp, *args, **kwargs): return self.assertValidResponse( resp, 'domain', self.assertValidDomain, *args, **kwargs) def assertValidDomain(self, entity, ref=None): if ref: pass return entity # project validation def assertValidProjectListResponse(self, resp, *args, **kwargs): return self.assertValidListResponse( resp, 'projects', self.assertValidProject, *args, **kwargs) def assertValidProjectResponse(self, resp, *args, **kwargs): return self.assertValidResponse( resp, 'project', self.assertValidProject, *args, **kwargs) def assertValidProject(self, entity, ref=None): if ref: self.assertEqual(ref['domain_id'], entity['domain_id']) return entity # user validation def assertValidUserListResponse(self, resp, *args, **kwargs): return self.assertValidListResponse( resp, 'users', self.assertValidUser, keys_to_check=['name', 'enabled'], *args, **kwargs) def assertValidUserResponse(self, resp, *args, **kwargs): return self.assertValidResponse( resp, 'user', self.assertValidUser, keys_to_check=['name', 'enabled'], *args, **kwargs) def assertValidUser(self, entity, ref=None): self.assertIsNotNone(entity.get('domain_id')) self.assertIsNotNone(entity.get('email')) self.assertIsNone(entity.get('password')) self.assertNotIn('tenantId', entity) if ref: self.assertEqual(ref['domain_id'], entity['domain_id']) self.assertEqual(ref['email'], entity['email']) if 'default_project_id' in ref: self.assertIsNotNone(ref['default_project_id']) self.assertEqual(ref['default_project_id'], entity['default_project_id']) return entity # group validation def assertValidGroupListResponse(self, resp, *args, **kwargs): return self.assertValidListResponse( resp, 'groups', self.assertValidGroup, keys_to_check=['name', 'description', 'domain_id'], *args, **kwargs) def assertValidGroupResponse(self, resp, *args, **kwargs): return self.assertValidResponse( resp, 'group', self.assertValidGroup, keys_to_check=['name', 'description', 'domain_id'], *args, **kwargs) def assertValidGroup(self, entity, ref=None): self.assertIsNotNone(entity.get('name')) if ref: self.assertEqual(ref['name'], entity['name']) return entity # credential validation def assertValidCredentialListResponse(self, resp, *args, **kwargs): return self.assertValidListResponse( resp, 'credentials', self.assertValidCredential, keys_to_check=['blob', 'user_id', 'type'], *args, **kwargs) def assertValidCredentialResponse(self, resp, *args, **kwargs): return self.assertValidResponse( resp, 'credential', self.assertValidCredential, keys_to_check=['blob', 'user_id', 'type'], *args, **kwargs) def assertValidCredential(self, entity, ref=None): self.assertIsNotNone(entity.get('user_id')) self.assertIsNotNone(entity.get('blob')) self.assertIsNotNone(entity.get('type')) if ref: self.assertEqual(ref['user_id'], entity['user_id']) self.assertEqual(ref['blob'], entity['blob']) self.assertEqual(ref['type'], entity['type']) self.assertEqual(ref.get('project_id'), entity.get('project_id')) return entity # role validation def assertValidRoleListResponse(self, resp, *args, **kwargs): return self.assertValidListResponse( resp, 'roles', self.assertValidRole, keys_to_check=['name'], *args, **kwargs) def assertRoleInListResponse(self, resp, ref, expected=1): found_count = 0 for entity in resp.result.get('roles'): try: self.assertValidRole(entity, ref=ref) except Exception: # It doesn't match, so let's go onto the next one pass else: found_count += 1 self.assertEqual(expected, found_count) def assertRoleNotInListResponse(self, resp, ref): self.assertRoleInListResponse(resp, ref=ref, expected=0) def assertValidRoleResponse(self, resp, *args, **kwargs): return self.assertValidResponse( resp, 'role', self.assertValidRole, keys_to_check=['name'], *args, **kwargs) def assertValidRole(self, entity, ref=None): self.assertIsNotNone(entity.get('name')) if ref: self.assertEqual(ref['name'], entity['name']) self.assertEqual(ref['domain_id'], entity['domain_id']) return entity # role assignment validation def assertValidRoleAssignmentListResponse(self, resp, expected_length=None, resource_url=None): entities = resp.result.get('role_assignments') if expected_length: self.assertEqual(expected_length, len(entities)) # Collections should have relational links self.assertValidListLinks(resp.result.get('links'), resource_url=resource_url) for entity in entities: self.assertIsNotNone(entity) self.assertValidRoleAssignment(entity) return entities def assertValidRoleAssignment(self, entity, ref=None): # A role should be present self.assertIsNotNone(entity.get('role')) self.assertIsNotNone(entity['role'].get('id')) # Only one of user or group should be present if entity.get('user'): self.assertIsNone(entity.get('group')) self.assertIsNotNone(entity['user'].get('id')) else: self.assertIsNotNone(entity.get('group')) self.assertIsNotNone(entity['group'].get('id')) # A scope should be present and have only one of domain or project self.assertIsNotNone(entity.get('scope')) if entity['scope'].get('project'): self.assertIsNone(entity['scope'].get('domain')) self.assertIsNotNone(entity['scope']['project'].get('id')) else: self.assertIsNotNone(entity['scope'].get('domain')) self.assertIsNotNone(entity['scope']['domain'].get('id')) # An assignment link should be present self.assertIsNotNone(entity.get('links')) self.assertIsNotNone(entity['links'].get('assignment')) if ref: links = ref.pop('links') try: self.assertDictContainsSubset(ref, entity) self.assertIn(links['assignment'], entity['links']['assignment']) finally: if links: ref['links'] = links def assertRoleAssignmentInListResponse(self, resp, ref, expected=1): found_count = 0 for entity in resp.result.get('role_assignments'): try: self.assertValidRoleAssignment(entity, ref=ref) except Exception: # It doesn't match, so let's go onto the next one pass else: found_count += 1 self.assertEqual(expected, found_count) def assertRoleAssignmentNotInListResponse(self, resp, ref): self.assertRoleAssignmentInListResponse(resp, ref=ref, expected=0) # policy validation def assertValidPolicyListResponse(self, resp, *args, **kwargs): return self.assertValidListResponse( resp, 'policies', self.assertValidPolicy, *args, **kwargs) def assertValidPolicyResponse(self, resp, *args, **kwargs): return self.assertValidResponse( resp, 'policy', self.assertValidPolicy, *args, **kwargs) def assertValidPolicy(self, entity, ref=None): self.assertIsNotNone(entity.get('blob')) self.assertIsNotNone(entity.get('type')) if ref: self.assertEqual(ref['blob'], entity['blob']) self.assertEqual(ref['type'], entity['type']) return entity # trust validation def assertValidTrustListResponse(self, resp, *args, **kwargs): return self.assertValidListResponse( resp, 'trusts', self.assertValidTrustSummary, keys_to_check=['trustor_user_id', 'trustee_user_id', 'impersonation'], *args, **kwargs) def assertValidTrustResponse(self, resp, *args, **kwargs): return self.assertValidResponse( resp, 'trust', self.assertValidTrust, keys_to_check=['trustor_user_id', 'trustee_user_id', 'impersonation'], *args, **kwargs) def assertValidTrustSummary(self, entity, ref=None): return self.assertValidTrust(entity, ref, summary=True) def assertValidTrust(self, entity, ref=None, summary=False): self.assertIsNotNone(entity.get('trustor_user_id')) self.assertIsNotNone(entity.get('trustee_user_id')) self.assertIsNotNone(entity.get('impersonation')) self.assertIn('expires_at', entity) if entity['expires_at'] is not None: self.assertValidISO8601ExtendedFormatDatetime(entity['expires_at']) if summary: # Trust list contains no roles, but getting a specific # trust by ID provides the detailed response containing roles self.assertNotIn('roles', entity) self.assertIn('project_id', entity) else: for role in entity['roles']: self.assertIsNotNone(role) self.assertValidEntity(role, keys_to_check=['name']) self.assertValidRole(role) self.assertValidListLinks(entity.get('roles_links')) # always disallow role xor project_id (neither or both is allowed) has_roles = bool(entity.get('roles')) has_project = bool(entity.get('project_id')) self.assertFalse(has_roles ^ has_project) if ref: self.assertEqual(ref['trustor_user_id'], entity['trustor_user_id']) self.assertEqual(ref['trustee_user_id'], entity['trustee_user_id']) self.assertEqual(ref['project_id'], entity['project_id']) if entity.get('expires_at') or ref.get('expires_at'): entity_exp = self.assertValidISO8601ExtendedFormatDatetime( entity['expires_at']) ref_exp = self.assertValidISO8601ExtendedFormatDatetime( ref['expires_at']) self.assertCloseEnoughForGovernmentWork(entity_exp, ref_exp) else: self.assertEqual(ref.get('expires_at'), entity.get('expires_at')) return entity # Service providers (federation) def assertValidServiceProvider(self, entity, ref=None, *args, **kwargs): attributes = frozenset(['auth_url', 'id', 'enabled', 'description', 'links', 'relay_state_prefix', 'sp_url']) for attribute in attributes: self.assertIsNotNone(entity.get(attribute)) def assertValidServiceProviderListResponse(self, resp, *args, **kwargs): if kwargs.get('keys_to_check') is None: kwargs['keys_to_check'] = ['auth_url', 'id', 'enabled', 'description', 'relay_state_prefix', 'sp_url'] return self.assertValidListResponse( resp, 'service_providers', self.assertValidServiceProvider, *args, **kwargs) def build_external_auth_request(self, remote_user, remote_domain=None, auth_data=None, kerberos=False): context = {'environment': {'REMOTE_USER': remote_user, 'AUTH_TYPE': 'Negotiate'}} if remote_domain: context['environment']['REMOTE_DOMAIN'] = remote_domain if not auth_data: auth_data = self.build_authentication_request( kerberos=kerberos)['auth'] no_context = None auth_info = auth.controllers.AuthInfo.create(no_context, auth_data) auth_context = {'extras': {}, 'method_names': []} return context, auth_info, auth_context class VersionTestCase(RestfulTestCase): def test_get_version(self): pass # NOTE(morganfainberg): To be removed when admin_token_auth is removed. This # has been split out to allow testing admin_token auth without enabling it # for other tests. class AuthContextMiddlewareAdminTokenTestCase(RestfulTestCase): EXTENSION_TO_ADD = 'admin_token_auth' def config_overrides(self): super(AuthContextMiddlewareAdminTokenTestCase, self).config_overrides() self.config_fixture.config( admin_token='ADMIN') # NOTE(morganfainberg): This is knowingly copied from below for simplicity # during the deprecation cycle. def _middleware_request(self, token, extra_environ=None): def application(environ, start_response): body = b'body' headers = [('Content-Type', 'text/html; charset=utf8'), ('Content-Length', str(len(body)))] start_response('200 OK', headers) return [body] app = webtest.TestApp(middleware.AuthContextMiddleware(application), extra_environ=extra_environ) resp = app.get('/', headers={middleware.AUTH_TOKEN_HEADER: token}) self.assertEqual('body', resp.text) # just to make sure it worked return resp.request def test_admin_auth_context(self): # test to make sure AuthContextMiddleware does not attempt to build the # auth context if the admin_token middleware indicates it's admin # already. token_id = uuid.uuid4().hex # token doesn't matter. # the admin_token middleware sets is_admin in the context. extra_environ = {middleware.CONTEXT_ENV: {'is_admin': True}} req = self._middleware_request(token_id, extra_environ) auth_context = req.environ.get(authorization.AUTH_CONTEXT_ENV) self.assertDictEqual({}, auth_context) @mock.patch.object(middleware_auth.versionutils, 'report_deprecated_feature') def test_admin_token_auth_context_deprecated(self, mock_report_deprecated): # For backwards compatibility AuthContextMiddleware will check that the # admin token (as configured in the CONF file) is present and not # attempt to build the auth context. This is deprecated. req = self._middleware_request('ADMIN') auth_context = req.environ.get(authorization.AUTH_CONTEXT_ENV) self.assertDictEqual({}, auth_context) self.assertEqual(1, mock_report_deprecated.call_count) # NOTE(gyee): test AuthContextMiddleware here instead of test_middleware.py # because we need the token class AuthContextMiddlewareTestCase(RestfulTestCase): def _middleware_request(self, token, extra_environ=None): def application(environ, start_response): body = b'body' headers = [('Content-Type', 'text/html; charset=utf8'), ('Content-Length', str(len(body)))] start_response('200 OK', headers) return [body] app = webtest.TestApp(middleware.AuthContextMiddleware(application), extra_environ=extra_environ) resp = app.get('/', headers={middleware.AUTH_TOKEN_HEADER: token}) self.assertEqual(b'body', resp.body) # just to make sure it worked return resp.request def test_auth_context_build_by_middleware(self): # test to make sure AuthContextMiddleware successful build the auth # context from the incoming auth token admin_token = self.get_scoped_token() req = self._middleware_request(admin_token) self.assertEqual( self.user['id'], req.environ.get(authorization.AUTH_CONTEXT_ENV)['user_id']) def test_auth_context_override(self): overridden_context = 'OVERRIDDEN_CONTEXT' # this token should not be used token = uuid.uuid4().hex extra_environ = {authorization.AUTH_CONTEXT_ENV: overridden_context} req = self._middleware_request(token, extra_environ=extra_environ) # make sure overridden context take precedence self.assertEqual(overridden_context, req.environ.get(authorization.AUTH_CONTEXT_ENV)) def test_unscoped_token_auth_context(self): unscoped_token = self.get_unscoped_token() req = self._middleware_request(unscoped_token) for key in ['project_id', 'domain_id', 'domain_name']: self.assertNotIn( key, req.environ.get(authorization.AUTH_CONTEXT_ENV)) def test_project_scoped_token_auth_context(self): project_scoped_token = self.get_scoped_token() req = self._middleware_request(project_scoped_token) self.assertEqual( self.project['id'], req.environ.get(authorization.AUTH_CONTEXT_ENV)['project_id']) def test_domain_scoped_token_auth_context(self): # grant the domain role to user path = '/domains/%s/users/%s/roles/%s' % ( self.domain['id'], self.user['id'], self.role['id']) self.put(path=path) domain_scoped_token = self.get_domain_scoped_token() req = self._middleware_request(domain_scoped_token) self.assertEqual( self.domain['id'], req.environ.get(authorization.AUTH_CONTEXT_ENV)['domain_id']) self.assertEqual( self.domain['name'], req.environ.get(authorization.AUTH_CONTEXT_ENV)['domain_name']) def test_oslo_context(self): # After AuthContextMiddleware runs, an # oslo_context.context.RequestContext was created so that its fields # can be logged. This test validates that the RequestContext was # created and the fields are set as expected. # Use a scoped token so more fields can be set. token = self.get_scoped_token() # oslo_middleware RequestId middleware sets openstack.request_id. request_id = uuid.uuid4().hex environ = {'openstack.request_id': request_id} self._middleware_request(token, extra_environ=environ) req_context = oslo_context.context.get_current() self.assertEqual(request_id, req_context.request_id) self.assertEqual(token, req_context.auth_token) self.assertEqual(self.user['id'], req_context.user) self.assertEqual(self.project['id'], req_context.tenant) self.assertIsNone(req_context.domain) self.assertEqual(self.user['domain_id'], req_context.user_domain) self.assertEqual(self.project['domain_id'], req_context.project_domain) self.assertFalse(req_context.is_admin) class JsonHomeTestMixin(object): """JSON Home test Mixin this class to provide a test for the JSON-Home response for an extension. The base class must set JSON_HOME_DATA to a dict of relationship URLs (rels) to the JSON-Home data for the relationship. The rels and associated data must be in the response. """ def test_get_json_home(self): resp = self.get('/', convert=False, headers={'Accept': 'application/json-home'}) self.assertThat(resp.headers['Content-Type'], matchers.Equals('application/json-home')) resp_data = jsonutils.loads(resp.body) # Check that the example relationships are present. for rel in self.JSON_HOME_DATA: self.assertThat(resp_data['resources'][rel], matchers.Equals(self.JSON_HOME_DATA[rel])) class AssignmentTestMixin(object): """To hold assignment helper functions.""" def build_role_assignment_query_url(self, effective=False, **filters): """Build and return a role assignment query url with provided params. Available filters are: domain_id, project_id, user_id, group_id, role_id and inherited_to_projects. """ query_params = '?effective' if effective else '' for k, v in filters.items(): query_params += '?' if not query_params else '&' if k == 'inherited_to_projects': query_params += 'scope.OS-INHERIT:inherited_to=projects' else: if k in ['domain_id', 'project_id']: query_params += 'scope.' elif k not in ['user_id', 'group_id', 'role_id']: raise ValueError( 'Invalid key \'%s\' in provided filters.' % k) query_params += '%s=%s' % (k.replace('_', '.'), v) return '/role_assignments%s' % query_params def build_role_assignment_link(self, **attribs): """Build and return a role assignment link with provided attributes. Provided attributes are expected to contain: domain_id or project_id, user_id or group_id, role_id and, optionally, inherited_to_projects. """ if attribs.get('domain_id'): link = '/domains/' + attribs['domain_id'] else: link = '/projects/' + attribs['project_id'] if attribs.get('user_id'): link += '/users/' + attribs['user_id'] else: link += '/groups/' + attribs['group_id'] link += '/roles/' + attribs['role_id'] if attribs.get('inherited_to_projects'): return '/OS-INHERIT%s/inherited_to_projects' % link return link def build_role_assignment_entity( self, link=None, prior_role_link=None, **attribs): """Build and return a role assignment entity with provided attributes. Provided attributes are expected to contain: domain_id or project_id, user_id or group_id, role_id and, optionally, inherited_to_projects. """ entity = {'links': {'assignment': ( link or self.build_role_assignment_link(**attribs))}} if attribs.get('domain_id'): entity['scope'] = {'domain': {'id': attribs['domain_id']}} else: entity['scope'] = {'project': {'id': attribs['project_id']}} if attribs.get('user_id'): entity['user'] = {'id': attribs['user_id']} if attribs.get('group_id'): entity['links']['membership'] = ('/groups/%s/users/%s' % (attribs['group_id'], attribs['user_id'])) else: entity['group'] = {'id': attribs['group_id']} entity['role'] = {'id': attribs['role_id']} if attribs.get('inherited_to_projects'): entity['scope']['OS-INHERIT:inherited_to'] = 'projects' if prior_role_link: entity['links']['prior_role'] = prior_role_link return entity def build_role_assignment_entity_include_names(self, domain_ref=None, role_ref=None, group_ref=None, user_ref=None, project_ref=None, inherited_assignment=None): """Build and return a role assignment entity with provided attributes. The expected attributes are: domain_ref or project_ref, user_ref or group_ref, role_ref and, optionally, inherited_to_projects. """ entity = {'links': {}} attributes_for_links = {} if project_ref: dmn_name = self.resource_api.get_domain( project_ref['domain_id'])['name'] entity['scope'] = {'project': { 'id': project_ref['id'], 'name': project_ref['name'], 'domain': { 'id': project_ref['domain_id'], 'name': dmn_name}}} attributes_for_links['project_id'] = project_ref['id'] else: entity['scope'] = {'domain': {'id': domain_ref['id'], 'name': domain_ref['name']}} attributes_for_links['domain_id'] = domain_ref['id'] if user_ref: dmn_name = self.resource_api.get_domain( user_ref['domain_id'])['name'] entity['user'] = {'id': user_ref['id'], 'name': user_ref['name'], 'domain': {'id': user_ref['domain_id'], 'name': dmn_name}} attributes_for_links['user_id'] = user_ref['id'] else: dmn_name = self.resource_api.get_domain( group_ref['domain_id'])['name'] entity['group'] = {'id': group_ref['id'], 'name': group_ref['name'], 'domain': { 'id': group_ref['domain_id'], 'name': dmn_name}} attributes_for_links['group_id'] = group_ref['id'] if role_ref: entity['role'] = {'id': role_ref['id'], 'name': role_ref['name']} attributes_for_links['role_id'] = role_ref['id'] if inherited_assignment: entity['scope']['OS-INHERIT:inherited_to'] = 'projects' attributes_for_links['inherited_to_projects'] = True entity['links']['assignment'] = self.build_role_assignment_link( **attributes_for_links) return entity keystone-9.0.0/keystone/tests/unit/common/0000775000567000056710000000000012701407246021777 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/common/test_json_home.py0000664000567000056710000000711512701407102025364 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from testtools import matchers from keystone.common import json_home from keystone.tests import unit class JsonHomeTest(unit.BaseTestCase): def test_build_v3_resource_relation(self): resource_name = self.getUniqueString() relation = json_home.build_v3_resource_relation(resource_name) exp_relation = ( 'http://docs.openstack.org/api/openstack-identity/3/rel/%s' % resource_name) self.assertThat(relation, matchers.Equals(exp_relation)) def test_build_v3_extension_resource_relation(self): extension_name = self.getUniqueString() extension_version = self.getUniqueString() resource_name = self.getUniqueString() relation = json_home.build_v3_extension_resource_relation( extension_name, extension_version, resource_name) exp_relation = ( 'http://docs.openstack.org/api/openstack-identity/3/ext/%s/%s/rel/' '%s' % (extension_name, extension_version, resource_name)) self.assertThat(relation, matchers.Equals(exp_relation)) def test_build_v3_parameter_relation(self): parameter_name = self.getUniqueString() relation = json_home.build_v3_parameter_relation(parameter_name) exp_relation = ( 'http://docs.openstack.org/api/openstack-identity/3/param/%s' % parameter_name) self.assertThat(relation, matchers.Equals(exp_relation)) def test_build_v3_extension_parameter_relation(self): extension_name = self.getUniqueString() extension_version = self.getUniqueString() parameter_name = self.getUniqueString() relation = json_home.build_v3_extension_parameter_relation( extension_name, extension_version, parameter_name) exp_relation = ( 'http://docs.openstack.org/api/openstack-identity/3/ext/%s/%s/' 'param/%s' % (extension_name, extension_version, parameter_name)) self.assertThat(relation, matchers.Equals(exp_relation)) def test_translate_urls(self): href_rel = self.getUniqueString() href = self.getUniqueString() href_template_rel = self.getUniqueString() href_template = self.getUniqueString() href_vars = {self.getUniqueString(): self.getUniqueString()} original_json_home = { 'resources': { href_rel: {'href': href}, href_template_rel: { 'href-template': href_template, 'href-vars': href_vars} } } new_json_home = copy.deepcopy(original_json_home) new_prefix = self.getUniqueString() json_home.translate_urls(new_json_home, new_prefix) exp_json_home = { 'resources': { href_rel: {'href': new_prefix + href}, href_template_rel: { 'href-template': new_prefix + href_template, 'href-vars': href_vars} } } self.assertThat(new_json_home, matchers.Equals(exp_json_home)) keystone-9.0.0/keystone/tests/unit/common/test_injection.py0000664000567000056710000001706212701407102025367 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from keystone.common import dependency from keystone.tests import unit class TestDependencyInjection(unit.BaseTestCase): def setUp(self): super(TestDependencyInjection, self).setUp() dependency.reset() self.addCleanup(dependency.reset) def test_dependency_injection(self): class Interface(object): def do_work(self): assert False @dependency.provider('first_api') class FirstImplementation(Interface): def do_work(self): return True @dependency.provider('second_api') class SecondImplementation(Interface): def do_work(self): return True @dependency.requires('first_api', 'second_api') class Consumer(object): def do_work_with_dependencies(self): assert self.first_api.do_work() assert self.second_api.do_work() # initialize dependency providers first_api = FirstImplementation() second_api = SecondImplementation() # ... sometime later, initialize a dependency consumer consumer = Consumer() # the expected dependencies should be available to the consumer self.assertIs(consumer.first_api, first_api) self.assertIs(consumer.second_api, second_api) self.assertIsInstance(consumer.first_api, Interface) self.assertIsInstance(consumer.second_api, Interface) consumer.do_work_with_dependencies() def test_dependency_provider_configuration(self): @dependency.provider('api') class Configurable(object): def __init__(self, value=None): self.value = value def get_value(self): return self.value @dependency.requires('api') class Consumer(object): def get_value(self): return self.api.get_value() # initialize dependency providers api = Configurable(value=True) # ... sometime later, initialize a dependency consumer consumer = Consumer() # the expected dependencies should be available to the consumer self.assertIs(consumer.api, api) self.assertIsInstance(consumer.api, Configurable) self.assertTrue(consumer.get_value()) def test_dependency_consumer_configuration(self): @dependency.provider('api') class Provider(object): def get_value(self): return True @dependency.requires('api') class Configurable(object): def __init__(self, value=None): self.value = value def get_value(self): if self.value: return self.api.get_value() # initialize dependency providers api = Provider() # ... sometime later, initialize a dependency consumer consumer = Configurable(value=True) # the expected dependencies should be available to the consumer self.assertIs(consumer.api, api) self.assertIsInstance(consumer.api, Provider) self.assertTrue(consumer.get_value()) def test_inherited_dependency(self): class Interface(object): def do_work(self): assert False @dependency.provider('first_api') class FirstImplementation(Interface): def do_work(self): return True @dependency.provider('second_api') class SecondImplementation(Interface): def do_work(self): return True @dependency.requires('first_api') class ParentConsumer(object): def do_work_with_dependencies(self): assert self.first_api.do_work() @dependency.requires('second_api') class ChildConsumer(ParentConsumer): def do_work_with_dependencies(self): assert self.second_api.do_work() super(ChildConsumer, self).do_work_with_dependencies() # initialize dependency providers first_api = FirstImplementation() second_api = SecondImplementation() # ... sometime later, initialize a dependency consumer consumer = ChildConsumer() # dependencies should be naturally inherited self.assertEqual( set(['first_api']), ParentConsumer._dependencies) self.assertEqual( set(['first_api', 'second_api']), ChildConsumer._dependencies) self.assertEqual( set(['first_api', 'second_api']), consumer._dependencies) # the expected dependencies should be available to the consumer self.assertIs(consumer.first_api, first_api) self.assertIs(consumer.second_api, second_api) self.assertIsInstance(consumer.first_api, Interface) self.assertIsInstance(consumer.second_api, Interface) consumer.do_work_with_dependencies() def test_unresolvable_dependency(self): @dependency.requires(uuid.uuid4().hex) class Consumer(object): pass def for_test(): Consumer() dependency.resolve_future_dependencies() self.assertRaises(dependency.UnresolvableDependencyException, for_test) def test_circular_dependency(self): p1_name = uuid.uuid4().hex p2_name = uuid.uuid4().hex @dependency.provider(p1_name) @dependency.requires(p2_name) class P1(object): pass @dependency.provider(p2_name) @dependency.requires(p1_name) class P2(object): pass p1 = P1() p2 = P2() dependency.resolve_future_dependencies() self.assertIs(getattr(p1, p2_name), p2) self.assertIs(getattr(p2, p1_name), p1) def test_reset(self): # Can reset the registry of providers. p_id = uuid.uuid4().hex @dependency.provider(p_id) class P(object): pass p_inst = P() self.assertIs(dependency.get_provider(p_id), p_inst) dependency.reset() self.assertFalse(dependency._REGISTRY) def test_get_provider(self): # Can get the instance of a provider using get_provider provider_name = uuid.uuid4().hex @dependency.provider(provider_name) class P(object): pass provider_instance = P() retrieved_provider_instance = dependency.get_provider(provider_name) self.assertIs(provider_instance, retrieved_provider_instance) def test_get_provider_not_provided_error(self): # If no provider and provider is required then fails. provider_name = uuid.uuid4().hex self.assertRaises(KeyError, dependency.get_provider, provider_name) def test_get_provider_not_provided_optional(self): # If no provider and provider is optional then returns None. provider_name = uuid.uuid4().hex self.assertIsNone(dependency.get_provider(provider_name, dependency.GET_OPTIONAL)) keystone-9.0.0/keystone/tests/unit/common/__init__.py0000664000567000056710000000000012701407102024065 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/common/test_sql_core.py0000664000567000056710000000366312701407102025216 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.ext import declarative from keystone.common import sql from keystone.tests import unit from keystone.tests.unit import utils ModelBase = declarative.declarative_base() class TestModel(ModelBase, sql.ModelDictMixin): __tablename__ = 'testmodel' id = sql.Column(sql.String(64), primary_key=True) text = sql.Column(sql.String(64), nullable=False) class TestModelDictMixin(unit.BaseTestCase): def test_creating_a_model_instance_from_a_dict(self): d = {'id': utils.new_uuid(), 'text': utils.new_uuid()} m = TestModel.from_dict(d) self.assertEqual(d['id'], m.id) self.assertEqual(d['text'], m.text) def test_creating_a_dict_from_a_model_instance(self): m = TestModel(id=utils.new_uuid(), text=utils.new_uuid()) d = m.to_dict() self.assertEqual(d['id'], m.id) self.assertEqual(d['text'], m.text) def test_creating_a_model_instance_from_an_invalid_dict(self): d = {'id': utils.new_uuid(), 'text': utils.new_uuid(), 'extra': None} self.assertRaises(TypeError, TestModel.from_dict, d) def test_creating_a_dict_from_a_model_instance_that_has_extra_attrs(self): expected = {'id': utils.new_uuid(), 'text': utils.new_uuid()} m = TestModel(id=expected['id'], text=expected['text']) m.extra = 'this should not be in the dictionary' self.assertEqual(expected, m.to_dict()) keystone-9.0.0/keystone/tests/unit/common/test_utils.py0000664000567000056710000002076712701407102024553 0ustar jenkinsjenkins00000000000000# encoding: utf-8 # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import uuid from oslo_config import cfg from oslo_config import fixture as config_fixture from oslo_serialization import jsonutils import six from keystone.common import utils as common_utils from keystone import exception from keystone.tests import unit from keystone.tests.unit import utils from keystone.version import service CONF = cfg.CONF TZ = utils.TZ class UtilsTestCase(unit.BaseTestCase): OPTIONAL = object() def setUp(self): super(UtilsTestCase, self).setUp() self.config_fixture = self.useFixture(config_fixture.Config(CONF)) def test_resource_uuid(self): uuid_str = '536e28c2017e405e89b25a1ed777b952' self.assertEqual(uuid_str, common_utils.resource_uuid(uuid_str)) # Exact 64 length string. uuid_str = ('536e28c2017e405e89b25a1ed777b952' 'f13de678ac714bb1b7d1e9a007c10db5') resource_id_namespace = common_utils.RESOURCE_ID_NAMESPACE transformed_id = uuid.uuid5(resource_id_namespace, uuid_str).hex self.assertEqual(transformed_id, common_utils.resource_uuid(uuid_str)) # Non-ASCII character test. non_ascii_ = 'ß' * 32 transformed_id = uuid.uuid5(resource_id_namespace, non_ascii_).hex self.assertEqual(transformed_id, common_utils.resource_uuid(non_ascii_)) # This input is invalid because it's length is more than 64. invalid_input = 'x' * 65 self.assertRaises(ValueError, common_utils.resource_uuid, invalid_input) # 64 length unicode string, to mimic what is returned from mapping_id # backend. uuid_str = six.text_type('536e28c2017e405e89b25a1ed777b952' 'f13de678ac714bb1b7d1e9a007c10db5') resource_id_namespace = common_utils.RESOURCE_ID_NAMESPACE if six.PY2: uuid_str = uuid_str.encode('utf-8') transformed_id = uuid.uuid5(resource_id_namespace, uuid_str).hex self.assertEqual(transformed_id, common_utils.resource_uuid(uuid_str)) def test_hash(self): password = 'right' wrong = 'wrongwrong' # Two wrongs don't make a right hashed = common_utils.hash_password(password) self.assertTrue(common_utils.check_password(password, hashed)) self.assertFalse(common_utils.check_password(wrong, hashed)) def test_verify_normal_password_strict(self): self.config_fixture.config(strict_password_check=False) password = uuid.uuid4().hex verified = common_utils.verify_length_and_trunc_password(password) self.assertEqual(password, verified) def test_that_a_hash_can_not_be_validated_against_a_hash(self): # NOTE(dstanek): Bug 1279849 reported a problem where passwords # were not being hashed if they already looked like a hash. This # would allow someone to hash their password ahead of time # (potentially getting around password requirements, like # length) and then they could auth with their original password. password = uuid.uuid4().hex hashed_password = common_utils.hash_password(password) new_hashed_password = common_utils.hash_password(hashed_password) self.assertFalse(common_utils.check_password(password, new_hashed_password)) def test_verify_long_password_strict(self): self.config_fixture.config(strict_password_check=False) self.config_fixture.config(group='identity', max_password_length=5) max_length = CONF.identity.max_password_length invalid_password = 'passw0rd' trunc = common_utils.verify_length_and_trunc_password(invalid_password) self.assertEqual(invalid_password[:max_length], trunc) def test_verify_long_password_strict_raises_exception(self): self.config_fixture.config(strict_password_check=True) self.config_fixture.config(group='identity', max_password_length=5) invalid_password = 'passw0rd' self.assertRaises(exception.PasswordVerificationError, common_utils.verify_length_and_trunc_password, invalid_password) def test_hash_long_password_truncation(self): self.config_fixture.config(strict_password_check=False) invalid_length_password = '0' * 9999999 hashed = common_utils.hash_password(invalid_length_password) self.assertTrue(common_utils.check_password(invalid_length_password, hashed)) def test_hash_long_password_strict(self): self.config_fixture.config(strict_password_check=True) invalid_length_password = '0' * 9999999 self.assertRaises(exception.PasswordVerificationError, common_utils.hash_password, invalid_length_password) def _create_test_user(self, password=OPTIONAL): user = {"name": "hthtest"} if password is not self.OPTIONAL: user['password'] = password return user def test_hash_user_password_without_password(self): user = self._create_test_user() hashed = common_utils.hash_user_password(user) self.assertEqual(user, hashed) def test_hash_user_password_with_null_password(self): user = self._create_test_user(password=None) hashed = common_utils.hash_user_password(user) self.assertEqual(user, hashed) def test_hash_user_password_with_empty_password(self): password = '' user = self._create_test_user(password=password) user_hashed = common_utils.hash_user_password(user) password_hashed = user_hashed['password'] self.assertTrue(common_utils.check_password(password, password_hashed)) def test_hash_edge_cases(self): hashed = common_utils.hash_password('secret') self.assertFalse(common_utils.check_password('', hashed)) self.assertFalse(common_utils.check_password(None, hashed)) def test_hash_unicode(self): password = u'Comment \xe7a va' wrong = 'Comment ?a va' hashed = common_utils.hash_password(password) self.assertTrue(common_utils.check_password(password, hashed)) self.assertFalse(common_utils.check_password(wrong, hashed)) def test_auth_str_equal(self): self.assertTrue(common_utils.auth_str_equal('abc123', 'abc123')) self.assertFalse(common_utils.auth_str_equal('a', 'aaaaa')) self.assertFalse(common_utils.auth_str_equal('aaaaa', 'a')) self.assertFalse(common_utils.auth_str_equal('ABC123', 'abc123')) def test_unixtime(self): global TZ @utils.timezone def _test_unixtime(): epoch = common_utils.unixtime(dt) self.assertEqual(epoch, epoch_ans, "TZ=%s" % TZ) dt = datetime.datetime(1970, 1, 2, 3, 4, 56, 0) epoch_ans = 56 + 4 * 60 + 3 * 3600 + 86400 for d in ['+0', '-11', '-8', '-5', '+5', '+8', '+14']: TZ = 'UTC' + d _test_unixtime() def test_pki_encoder(self): data = {'field': 'value'} json = jsonutils.dumps(data, cls=common_utils.PKIEncoder) expected_json = '{"field":"value"}' self.assertEqual(expected_json, json) def test_url_safe_check(self): base_str = 'i am safe' self.assertFalse(common_utils.is_not_url_safe(base_str)) for i in common_utils.URL_RESERVED_CHARS: self.assertTrue(common_utils.is_not_url_safe(base_str + i)) def test_url_safe_with_unicode_check(self): base_str = u'i am \xe7afe' self.assertFalse(common_utils.is_not_url_safe(base_str)) for i in common_utils.URL_RESERVED_CHARS: self.assertTrue(common_utils.is_not_url_safe(base_str + i)) class ServiceHelperTests(unit.BaseTestCase): @service.fail_gracefully def _do_test(self): raise Exception("Test Exc") def test_fail_gracefully(self): self.assertRaises(unit.UnexpectedExit, self._do_test) keystone-9.0.0/keystone/tests/unit/common/test_manager.py0000664000567000056710000000277312701407102025022 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from keystone import catalog from keystone.common import manager from keystone.tests import unit class TestCreateLegacyDriver(unit.BaseTestCase): @mock.patch('oslo_log.versionutils.report_deprecated_feature') def test_class_is_properly_deprecated(self, mock_reporter): Driver = manager.create_legacy_driver(catalog.CatalogDriverV8) # NOTE(dstanek): I want to subvert the requirement for this # class to implement all of the abstract methods. Driver.__abstractmethods__ = set() impl = Driver() details = { 'as_of': 'Liberty', 'what': 'keystone.catalog.core.Driver', 'in_favor_of': 'keystone.catalog.core.CatalogDriverV8', 'remove_in': mock.ANY, } mock_reporter.assert_called_with(mock.ANY, mock.ANY, details) self.assertEqual('N', mock_reporter.call_args[0][2]['remove_in'][0]) self.assertIsInstance(impl, catalog.CatalogDriverV8) keystone-9.0.0/keystone/tests/unit/common/test_authorization.py0000664000567000056710000001566312701407102026312 0ustar jenkinsjenkins00000000000000# Copyright 2015 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import uuid from keystone.common import authorization from keystone import exception from keystone.federation import constants as federation_constants from keystone.models import token_model from keystone.tests import unit from keystone.tests.unit import test_token_provider class TestTokenToAuthContext(unit.BaseTestCase): def test_token_is_project_scoped_with_trust(self): # Check auth_context result when the token is project-scoped and has # trust info. # SAMPLE_V3_TOKEN has OS-TRUST:trust in it. token_data = test_token_provider.SAMPLE_V3_TOKEN token = token_model.KeystoneToken(token_id=uuid.uuid4().hex, token_data=token_data) auth_context = authorization.token_to_auth_context(token) self.assertEqual(token, auth_context['token']) self.assertTrue(auth_context['is_delegated_auth']) self.assertEqual(token_data['token']['user']['id'], auth_context['user_id']) self.assertEqual(token_data['token']['user']['domain']['id'], auth_context['user_domain_id']) self.assertEqual(token_data['token']['project']['id'], auth_context['project_id']) self.assertEqual(token_data['token']['project']['domain']['id'], auth_context['project_domain_id']) self.assertNotIn('domain_id', auth_context) self.assertNotIn('domain_name', auth_context) self.assertEqual(token_data['token']['OS-TRUST:trust']['id'], auth_context['trust_id']) self.assertEqual( token_data['token']['OS-TRUST:trust']['trustor_user_id'], auth_context['trustor_id']) self.assertEqual( token_data['token']['OS-TRUST:trust']['trustee_user_id'], auth_context['trustee_id']) self.assertItemsEqual( [r['name'] for r in token_data['token']['roles']], auth_context['roles']) self.assertIsNone(auth_context['consumer_id']) self.assertIsNone(auth_context['access_token_id']) self.assertNotIn('group_ids', auth_context) def test_token_is_domain_scoped(self): # Check contents of auth_context when token is domain-scoped. token_data = copy.deepcopy(test_token_provider.SAMPLE_V3_TOKEN) del token_data['token']['project'] domain_id = uuid.uuid4().hex domain_name = uuid.uuid4().hex token_data['token']['domain'] = {'id': domain_id, 'name': domain_name} token = token_model.KeystoneToken(token_id=uuid.uuid4().hex, token_data=token_data) auth_context = authorization.token_to_auth_context(token) self.assertNotIn('project_id', auth_context) self.assertNotIn('project_domain_id', auth_context) self.assertEqual(domain_id, auth_context['domain_id']) self.assertEqual(domain_name, auth_context['domain_name']) def test_token_is_unscoped(self): # Check contents of auth_context when the token is unscoped. token_data = copy.deepcopy(test_token_provider.SAMPLE_V3_TOKEN) del token_data['token']['project'] token = token_model.KeystoneToken(token_id=uuid.uuid4().hex, token_data=token_data) auth_context = authorization.token_to_auth_context(token) self.assertNotIn('project_id', auth_context) self.assertNotIn('project_domain_id', auth_context) self.assertNotIn('domain_id', auth_context) self.assertNotIn('domain_name', auth_context) def test_token_is_for_federated_user(self): # When the token is for a federated user then group_ids is in # auth_context. token_data = copy.deepcopy(test_token_provider.SAMPLE_V3_TOKEN) group_ids = [uuid.uuid4().hex for x in range(1, 5)] federation_data = {'identity_provider': {'id': uuid.uuid4().hex}, 'protocol': {'id': 'saml2'}, 'groups': [{'id': gid} for gid in group_ids]} token_data['token']['user'][federation_constants.FEDERATION] = ( federation_data) token = token_model.KeystoneToken(token_id=uuid.uuid4().hex, token_data=token_data) auth_context = authorization.token_to_auth_context(token) self.assertItemsEqual(group_ids, auth_context['group_ids']) def test_oauth_variables_set_for_oauth_token(self): token_data = copy.deepcopy(test_token_provider.SAMPLE_V3_TOKEN) access_token_id = uuid.uuid4().hex consumer_id = uuid.uuid4().hex token_data['token']['OS-OAUTH1'] = {'access_token_id': access_token_id, 'consumer_id': consumer_id} token = token_model.KeystoneToken(token_id=uuid.uuid4().hex, token_data=token_data) auth_context = authorization.token_to_auth_context(token) self.assertEqual(access_token_id, auth_context['access_token_id']) self.assertEqual(consumer_id, auth_context['consumer_id']) def test_oauth_variables_not_set(self): token_data = copy.deepcopy(test_token_provider.SAMPLE_V3_TOKEN) token = token_model.KeystoneToken(token_id=uuid.uuid4().hex, token_data=token_data) auth_context = authorization.token_to_auth_context(token) self.assertIsNone(auth_context['access_token_id']) self.assertIsNone(auth_context['consumer_id']) def test_token_is_not_KeystoneToken_raises_exception(self): # If the token isn't a KeystoneToken then an UnexpectedError exception # is raised. self.assertRaises(exception.UnexpectedError, authorization.token_to_auth_context, {}) def test_user_id_missing_in_token_raises_exception(self): # If there's no user ID in the token then an Unauthorized # exception is raised. token_data = copy.deepcopy(test_token_provider.SAMPLE_V3_TOKEN) del token_data['token']['user']['id'] token = token_model.KeystoneToken(token_id=uuid.uuid4().hex, token_data=token_data) self.assertRaises(exception.Unauthorized, authorization.token_to_auth_context, token) keystone-9.0.0/keystone/tests/unit/common/test_notifications.py0000664000567000056710000015255212701407105026265 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import uuid import mock from oslo_config import cfg from oslo_config import fixture as config_fixture from oslotest import mockpatch from pycadf import cadftaxonomy from pycadf import cadftype from pycadf import eventfactory from pycadf import resource as cadfresource from keystone import notifications from keystone.tests import unit from keystone.tests.unit import test_v3 CONF = cfg.CONF EXP_RESOURCE_TYPE = uuid.uuid4().hex CREATED_OPERATION = notifications.ACTIONS.created UPDATED_OPERATION = notifications.ACTIONS.updated DELETED_OPERATION = notifications.ACTIONS.deleted DISABLED_OPERATION = notifications.ACTIONS.disabled class ArbitraryException(Exception): pass def register_callback(operation, resource_type=EXP_RESOURCE_TYPE): """Helper for creating and registering a mock callback.""" callback = mock.Mock(__name__='callback', im_class=mock.Mock(__name__='class')) notifications.register_event_callback(operation, resource_type, callback) return callback class AuditNotificationsTestCase(unit.BaseTestCase): def setUp(self): super(AuditNotificationsTestCase, self).setUp() self.config_fixture = self.useFixture(config_fixture.Config(CONF)) self.addCleanup(notifications.clear_subscribers) def _test_notification_operation(self, notify_function, operation): exp_resource_id = uuid.uuid4().hex callback = register_callback(operation) notify_function(EXP_RESOURCE_TYPE, exp_resource_id) callback.assert_called_once_with('identity', EXP_RESOURCE_TYPE, operation, {'resource_info': exp_resource_id}) self.config_fixture.config(notification_format='cadf') with mock.patch( 'keystone.notifications._create_cadf_payload') as cadf_notify: notify_function(EXP_RESOURCE_TYPE, exp_resource_id) initiator = None cadf_notify.assert_called_once_with( operation, EXP_RESOURCE_TYPE, exp_resource_id, notifications.taxonomy.OUTCOME_SUCCESS, initiator) notify_function(EXP_RESOURCE_TYPE, exp_resource_id, public=False) cadf_notify.assert_called_once_with( operation, EXP_RESOURCE_TYPE, exp_resource_id, notifications.taxonomy.OUTCOME_SUCCESS, initiator) def test_resource_created_notification(self): self._test_notification_operation(notifications.Audit.created, CREATED_OPERATION) def test_resource_updated_notification(self): self._test_notification_operation(notifications.Audit.updated, UPDATED_OPERATION) def test_resource_deleted_notification(self): self._test_notification_operation(notifications.Audit.deleted, DELETED_OPERATION) def test_resource_disabled_notification(self): self._test_notification_operation(notifications.Audit.disabled, DISABLED_OPERATION) class NotificationsTestCase(unit.BaseTestCase): def test_send_notification(self): """Test _send_notification. Test the private method _send_notification to ensure event_type, payload, and context are built and passed properly. """ resource = uuid.uuid4().hex resource_type = EXP_RESOURCE_TYPE operation = CREATED_OPERATION # NOTE(ldbragst): Even though notifications._send_notification doesn't # contain logic that creates cases, this is supposed to test that # context is always empty and that we ensure the resource ID of the # resource in the notification is contained in the payload. It was # agreed that context should be empty in Keystone's case, which is # also noted in the /keystone/notifications.py module. This test # ensures and maintains these conditions. expected_args = [ {}, # empty context 'identity.%s.created' % resource_type, # event_type {'resource_info': resource}, # payload 'INFO', # priority is always INFO... ] with mock.patch.object(notifications._get_notifier(), '_notify') as mocked: notifications._send_notification(operation, resource_type, resource) mocked.assert_called_once_with(*expected_args) def test_send_notification_with_opt_out(self): """Test the private method _send_notification with opt-out. Test that _send_notification does not notify when a valid notification_opt_out configuration is provided. """ resource = uuid.uuid4().hex resource_type = EXP_RESOURCE_TYPE operation = CREATED_OPERATION event_type = 'identity.%s.created' % resource_type # NOTE(diazjf): Here we add notification_opt_out to the # configuration so that we should return before _get_notifer is # called. This is because we are opting out notifications for the # passed resource_type and operation. conf = self.useFixture(config_fixture.Config(CONF)) conf.config(notification_opt_out=event_type) with mock.patch.object(notifications._get_notifier(), '_notify') as mocked: notifications._send_notification(operation, resource_type, resource) mocked.assert_not_called() def test_send_audit_notification_with_opt_out(self): """Test the private method _send_audit_notification with opt-out. Test that _send_audit_notification does not notify when a valid notification_opt_out configuration is provided. """ resource_type = EXP_RESOURCE_TYPE action = CREATED_OPERATION + '.' + resource_type initiator = mock target = mock outcome = 'success' event_type = 'identity.%s.created' % resource_type conf = self.useFixture(config_fixture.Config(CONF)) conf.config(notification_opt_out=event_type) with mock.patch.object(notifications._get_notifier(), '_notify') as mocked: notifications._send_audit_notification(action, initiator, outcome, target, event_type) mocked.assert_not_called() def test_opt_out_authenticate_event(self): """Test that authenticate events are successfully opted out.""" resource_type = EXP_RESOURCE_TYPE action = CREATED_OPERATION + '.' + resource_type initiator = mock target = mock outcome = 'success' event_type = 'identity.authenticate' meter_name = '%s.%s' % (event_type, outcome) conf = self.useFixture(config_fixture.Config(CONF)) conf.config(notification_opt_out=meter_name) with mock.patch.object(notifications._get_notifier(), '_notify') as mocked: notifications._send_audit_notification(action, initiator, outcome, target, event_type) mocked.assert_not_called() class BaseNotificationTest(test_v3.RestfulTestCase): def setUp(self): super(BaseNotificationTest, self).setUp() self._notifications = [] self._audits = [] def fake_notify(operation, resource_type, resource_id, actor_dict=None, public=True): note = { 'resource_id': resource_id, 'operation': operation, 'resource_type': resource_type, 'send_notification_called': True, 'public': public} if actor_dict: note['actor_id'] = actor_dict.get('id') note['actor_type'] = actor_dict.get('type') note['actor_operation'] = actor_dict.get('actor_operation') self._notifications.append(note) self.useFixture(mockpatch.PatchObject( notifications, '_send_notification', fake_notify)) def fake_audit(action, initiator, outcome, target, event_type, **kwargs): service_security = cadftaxonomy.SERVICE_SECURITY event = eventfactory.EventFactory().new_event( eventType=cadftype.EVENTTYPE_ACTIVITY, outcome=outcome, action=action, initiator=initiator, target=target, observer=cadfresource.Resource(typeURI=service_security)) for key, value in kwargs.items(): setattr(event, key, value) audit = { 'payload': event.as_dict(), 'event_type': event_type, 'send_notification_called': True} self._audits.append(audit) self.useFixture(mockpatch.PatchObject( notifications, '_send_audit_notification', fake_audit)) def _assert_last_note(self, resource_id, operation, resource_type, actor_id=None, actor_type=None, actor_operation=None): # NOTE(stevemar): If 'basic' format is not used, then simply # return since this assertion is not valid. if CONF.notification_format != 'basic': return self.assertTrue(len(self._notifications) > 0) note = self._notifications[-1] self.assertEqual(operation, note['operation']) self.assertEqual(resource_id, note['resource_id']) self.assertEqual(resource_type, note['resource_type']) self.assertTrue(note['send_notification_called']) if actor_id: self.assertEqual(actor_id, note['actor_id']) self.assertEqual(actor_type, note['actor_type']) self.assertEqual(actor_operation, note['actor_operation']) def _assert_last_audit(self, resource_id, operation, resource_type, target_uri): # NOTE(stevemar): If 'cadf' format is not used, then simply # return since this assertion is not valid. if CONF.notification_format != 'cadf': return self.assertTrue(len(self._audits) > 0) audit = self._audits[-1] payload = audit['payload'] self.assertEqual(resource_id, payload['resource_info']) action = '%s.%s' % (operation, resource_type) self.assertEqual(action, payload['action']) self.assertEqual(target_uri, payload['target']['typeURI']) self.assertEqual(resource_id, payload['target']['id']) event_type = '%s.%s.%s' % ('identity', resource_type, operation) self.assertEqual(event_type, audit['event_type']) self.assertTrue(audit['send_notification_called']) def _assert_initiator_data_is_set(self, operation, resource_type, typeURI): self.assertTrue(len(self._audits) > 0) audit = self._audits[-1] payload = audit['payload'] self.assertEqual(self.user_id, payload['initiator']['id']) self.assertEqual(self.project_id, payload['initiator']['project_id']) self.assertEqual(typeURI, payload['target']['typeURI']) action = '%s.%s' % (operation, resource_type) self.assertEqual(action, payload['action']) def _assert_notify_not_sent(self, resource_id, operation, resource_type, public=True): unexpected = { 'resource_id': resource_id, 'operation': operation, 'resource_type': resource_type, 'send_notification_called': True, 'public': public} for note in self._notifications: self.assertNotEqual(unexpected, note) def _assert_notify_sent(self, resource_id, operation, resource_type, public=True): expected = { 'resource_id': resource_id, 'operation': operation, 'resource_type': resource_type, 'send_notification_called': True, 'public': public} for note in self._notifications: if expected == note: break else: self.fail("Notification not sent.") class NotificationsForEntities(BaseNotificationTest): def test_create_group(self): group_ref = unit.new_group_ref(domain_id=self.domain_id) group_ref = self.identity_api.create_group(group_ref) self._assert_last_note(group_ref['id'], CREATED_OPERATION, 'group') self._assert_last_audit(group_ref['id'], CREATED_OPERATION, 'group', cadftaxonomy.SECURITY_GROUP) def test_create_project(self): project_ref = unit.new_project_ref(domain_id=self.domain_id) self.resource_api.create_project(project_ref['id'], project_ref) self._assert_last_note( project_ref['id'], CREATED_OPERATION, 'project') self._assert_last_audit(project_ref['id'], CREATED_OPERATION, 'project', cadftaxonomy.SECURITY_PROJECT) def test_create_role(self): role_ref = unit.new_role_ref() self.role_api.create_role(role_ref['id'], role_ref) self._assert_last_note(role_ref['id'], CREATED_OPERATION, 'role') self._assert_last_audit(role_ref['id'], CREATED_OPERATION, 'role', cadftaxonomy.SECURITY_ROLE) def test_create_user(self): user_ref = unit.new_user_ref(domain_id=self.domain_id) user_ref = self.identity_api.create_user(user_ref) self._assert_last_note(user_ref['id'], CREATED_OPERATION, 'user') self._assert_last_audit(user_ref['id'], CREATED_OPERATION, 'user', cadftaxonomy.SECURITY_ACCOUNT_USER) def test_create_trust(self): trustor = unit.new_user_ref(domain_id=self.domain_id) trustor = self.identity_api.create_user(trustor) trustee = unit.new_user_ref(domain_id=self.domain_id) trustee = self.identity_api.create_user(trustee) role_ref = unit.new_role_ref() self.role_api.create_role(role_ref['id'], role_ref) trust_ref = unit.new_trust_ref(trustor['id'], trustee['id']) self.trust_api.create_trust(trust_ref['id'], trust_ref, [role_ref]) self._assert_last_note( trust_ref['id'], CREATED_OPERATION, 'OS-TRUST:trust') self._assert_last_audit(trust_ref['id'], CREATED_OPERATION, 'OS-TRUST:trust', cadftaxonomy.SECURITY_TRUST) def test_delete_group(self): group_ref = unit.new_group_ref(domain_id=self.domain_id) group_ref = self.identity_api.create_group(group_ref) self.identity_api.delete_group(group_ref['id']) self._assert_last_note(group_ref['id'], DELETED_OPERATION, 'group') self._assert_last_audit(group_ref['id'], DELETED_OPERATION, 'group', cadftaxonomy.SECURITY_GROUP) def test_delete_project(self): project_ref = unit.new_project_ref(domain_id=self.domain_id) self.resource_api.create_project(project_ref['id'], project_ref) self.resource_api.delete_project(project_ref['id']) self._assert_last_note( project_ref['id'], DELETED_OPERATION, 'project') self._assert_last_audit(project_ref['id'], DELETED_OPERATION, 'project', cadftaxonomy.SECURITY_PROJECT) def test_delete_role(self): role_ref = unit.new_role_ref() self.role_api.create_role(role_ref['id'], role_ref) self.role_api.delete_role(role_ref['id']) self._assert_last_note(role_ref['id'], DELETED_OPERATION, 'role') self._assert_last_audit(role_ref['id'], DELETED_OPERATION, 'role', cadftaxonomy.SECURITY_ROLE) def test_delete_user(self): user_ref = unit.new_user_ref(domain_id=self.domain_id) user_ref = self.identity_api.create_user(user_ref) self.identity_api.delete_user(user_ref['id']) self._assert_last_note(user_ref['id'], DELETED_OPERATION, 'user') self._assert_last_audit(user_ref['id'], DELETED_OPERATION, 'user', cadftaxonomy.SECURITY_ACCOUNT_USER) def test_create_domain(self): domain_ref = unit.new_domain_ref() self.resource_api.create_domain(domain_ref['id'], domain_ref) self._assert_last_note(domain_ref['id'], CREATED_OPERATION, 'domain') self._assert_last_audit(domain_ref['id'], CREATED_OPERATION, 'domain', cadftaxonomy.SECURITY_DOMAIN) def test_update_domain(self): domain_ref = unit.new_domain_ref() self.resource_api.create_domain(domain_ref['id'], domain_ref) domain_ref['description'] = uuid.uuid4().hex self.resource_api.update_domain(domain_ref['id'], domain_ref) self._assert_last_note(domain_ref['id'], UPDATED_OPERATION, 'domain') self._assert_last_audit(domain_ref['id'], UPDATED_OPERATION, 'domain', cadftaxonomy.SECURITY_DOMAIN) def test_delete_domain(self): domain_ref = unit.new_domain_ref() self.resource_api.create_domain(domain_ref['id'], domain_ref) domain_ref['enabled'] = False self.resource_api.update_domain(domain_ref['id'], domain_ref) self.resource_api.delete_domain(domain_ref['id']) self._assert_last_note(domain_ref['id'], DELETED_OPERATION, 'domain') self._assert_last_audit(domain_ref['id'], DELETED_OPERATION, 'domain', cadftaxonomy.SECURITY_DOMAIN) def test_delete_trust(self): trustor = unit.new_user_ref(domain_id=self.domain_id) trustor = self.identity_api.create_user(trustor) trustee = unit.new_user_ref(domain_id=self.domain_id) trustee = self.identity_api.create_user(trustee) role_ref = unit.new_role_ref() trust_ref = unit.new_trust_ref(trustor['id'], trustee['id']) self.trust_api.create_trust(trust_ref['id'], trust_ref, [role_ref]) self.trust_api.delete_trust(trust_ref['id']) self._assert_last_note( trust_ref['id'], DELETED_OPERATION, 'OS-TRUST:trust') self._assert_last_audit(trust_ref['id'], DELETED_OPERATION, 'OS-TRUST:trust', cadftaxonomy.SECURITY_TRUST) def test_create_endpoint(self): endpoint_ref = unit.new_endpoint_ref(service_id=self.service_id, interface='public', region_id=self.region_id) self.catalog_api.create_endpoint(endpoint_ref['id'], endpoint_ref) self._assert_notify_sent(endpoint_ref['id'], CREATED_OPERATION, 'endpoint') self._assert_last_audit(endpoint_ref['id'], CREATED_OPERATION, 'endpoint', cadftaxonomy.SECURITY_ENDPOINT) def test_update_endpoint(self): endpoint_ref = unit.new_endpoint_ref(service_id=self.service_id, interface='public', region_id=self.region_id) self.catalog_api.create_endpoint(endpoint_ref['id'], endpoint_ref) self.catalog_api.update_endpoint(endpoint_ref['id'], endpoint_ref) self._assert_notify_sent(endpoint_ref['id'], UPDATED_OPERATION, 'endpoint') self._assert_last_audit(endpoint_ref['id'], UPDATED_OPERATION, 'endpoint', cadftaxonomy.SECURITY_ENDPOINT) def test_delete_endpoint(self): endpoint_ref = unit.new_endpoint_ref(service_id=self.service_id, interface='public', region_id=self.region_id) self.catalog_api.create_endpoint(endpoint_ref['id'], endpoint_ref) self.catalog_api.delete_endpoint(endpoint_ref['id']) self._assert_notify_sent(endpoint_ref['id'], DELETED_OPERATION, 'endpoint') self._assert_last_audit(endpoint_ref['id'], DELETED_OPERATION, 'endpoint', cadftaxonomy.SECURITY_ENDPOINT) def test_create_service(self): service_ref = unit.new_service_ref() self.catalog_api.create_service(service_ref['id'], service_ref) self._assert_notify_sent(service_ref['id'], CREATED_OPERATION, 'service') self._assert_last_audit(service_ref['id'], CREATED_OPERATION, 'service', cadftaxonomy.SECURITY_SERVICE) def test_update_service(self): service_ref = unit.new_service_ref() self.catalog_api.create_service(service_ref['id'], service_ref) self.catalog_api.update_service(service_ref['id'], service_ref) self._assert_notify_sent(service_ref['id'], UPDATED_OPERATION, 'service') self._assert_last_audit(service_ref['id'], UPDATED_OPERATION, 'service', cadftaxonomy.SECURITY_SERVICE) def test_delete_service(self): service_ref = unit.new_service_ref() self.catalog_api.create_service(service_ref['id'], service_ref) self.catalog_api.delete_service(service_ref['id']) self._assert_notify_sent(service_ref['id'], DELETED_OPERATION, 'service') self._assert_last_audit(service_ref['id'], DELETED_OPERATION, 'service', cadftaxonomy.SECURITY_SERVICE) def test_create_region(self): region_ref = unit.new_region_ref() self.catalog_api.create_region(region_ref) self._assert_notify_sent(region_ref['id'], CREATED_OPERATION, 'region') self._assert_last_audit(region_ref['id'], CREATED_OPERATION, 'region', cadftaxonomy.SECURITY_REGION) def test_update_region(self): region_ref = unit.new_region_ref() self.catalog_api.create_region(region_ref) self.catalog_api.update_region(region_ref['id'], region_ref) self._assert_notify_sent(region_ref['id'], UPDATED_OPERATION, 'region') self._assert_last_audit(region_ref['id'], UPDATED_OPERATION, 'region', cadftaxonomy.SECURITY_REGION) def test_delete_region(self): region_ref = unit.new_region_ref() self.catalog_api.create_region(region_ref) self.catalog_api.delete_region(region_ref['id']) self._assert_notify_sent(region_ref['id'], DELETED_OPERATION, 'region') self._assert_last_audit(region_ref['id'], DELETED_OPERATION, 'region', cadftaxonomy.SECURITY_REGION) def test_create_policy(self): policy_ref = unit.new_policy_ref() self.policy_api.create_policy(policy_ref['id'], policy_ref) self._assert_notify_sent(policy_ref['id'], CREATED_OPERATION, 'policy') self._assert_last_audit(policy_ref['id'], CREATED_OPERATION, 'policy', cadftaxonomy.SECURITY_POLICY) def test_update_policy(self): policy_ref = unit.new_policy_ref() self.policy_api.create_policy(policy_ref['id'], policy_ref) self.policy_api.update_policy(policy_ref['id'], policy_ref) self._assert_notify_sent(policy_ref['id'], UPDATED_OPERATION, 'policy') self._assert_last_audit(policy_ref['id'], UPDATED_OPERATION, 'policy', cadftaxonomy.SECURITY_POLICY) def test_delete_policy(self): policy_ref = unit.new_policy_ref() self.policy_api.create_policy(policy_ref['id'], policy_ref) self.policy_api.delete_policy(policy_ref['id']) self._assert_notify_sent(policy_ref['id'], DELETED_OPERATION, 'policy') self._assert_last_audit(policy_ref['id'], DELETED_OPERATION, 'policy', cadftaxonomy.SECURITY_POLICY) def test_disable_domain(self): domain_ref = unit.new_domain_ref() self.resource_api.create_domain(domain_ref['id'], domain_ref) domain_ref['enabled'] = False self.resource_api.update_domain(domain_ref['id'], domain_ref) self._assert_notify_sent(domain_ref['id'], 'disabled', 'domain', public=False) def test_disable_of_disabled_domain_does_not_notify(self): domain_ref = unit.new_domain_ref(enabled=False) self.resource_api.create_domain(domain_ref['id'], domain_ref) # The domain_ref above is not changed during the create process. We # can use the same ref to perform the update. self.resource_api.update_domain(domain_ref['id'], domain_ref) self._assert_notify_not_sent(domain_ref['id'], 'disabled', 'domain', public=False) def test_update_group(self): group_ref = unit.new_group_ref(domain_id=self.domain_id) group_ref = self.identity_api.create_group(group_ref) self.identity_api.update_group(group_ref['id'], group_ref) self._assert_last_note(group_ref['id'], UPDATED_OPERATION, 'group') self._assert_last_audit(group_ref['id'], UPDATED_OPERATION, 'group', cadftaxonomy.SECURITY_GROUP) def test_update_project(self): project_ref = unit.new_project_ref(domain_id=self.domain_id) self.resource_api.create_project(project_ref['id'], project_ref) self.resource_api.update_project(project_ref['id'], project_ref) self._assert_notify_sent( project_ref['id'], UPDATED_OPERATION, 'project', public=True) self._assert_last_audit(project_ref['id'], UPDATED_OPERATION, 'project', cadftaxonomy.SECURITY_PROJECT) def test_disable_project(self): project_ref = unit.new_project_ref(domain_id=self.domain_id) self.resource_api.create_project(project_ref['id'], project_ref) project_ref['enabled'] = False self.resource_api.update_project(project_ref['id'], project_ref) self._assert_notify_sent(project_ref['id'], 'disabled', 'project', public=False) def test_disable_of_disabled_project_does_not_notify(self): project_ref = unit.new_project_ref(domain_id=self.domain_id, enabled=False) self.resource_api.create_project(project_ref['id'], project_ref) # The project_ref above is not changed during the create process. We # can use the same ref to perform the update. self.resource_api.update_project(project_ref['id'], project_ref) self._assert_notify_not_sent(project_ref['id'], 'disabled', 'project', public=False) def test_update_project_does_not_send_disable(self): project_ref = unit.new_project_ref(domain_id=self.domain_id) self.resource_api.create_project(project_ref['id'], project_ref) project_ref['enabled'] = True self.resource_api.update_project(project_ref['id'], project_ref) self._assert_last_note( project_ref['id'], UPDATED_OPERATION, 'project') self._assert_notify_not_sent(project_ref['id'], 'disabled', 'project') def test_update_role(self): role_ref = unit.new_role_ref() self.role_api.create_role(role_ref['id'], role_ref) self.role_api.update_role(role_ref['id'], role_ref) self._assert_last_note(role_ref['id'], UPDATED_OPERATION, 'role') self._assert_last_audit(role_ref['id'], UPDATED_OPERATION, 'role', cadftaxonomy.SECURITY_ROLE) def test_update_user(self): user_ref = unit.new_user_ref(domain_id=self.domain_id) user_ref = self.identity_api.create_user(user_ref) self.identity_api.update_user(user_ref['id'], user_ref) self._assert_last_note(user_ref['id'], UPDATED_OPERATION, 'user') self._assert_last_audit(user_ref['id'], UPDATED_OPERATION, 'user', cadftaxonomy.SECURITY_ACCOUNT_USER) def test_config_option_no_events(self): self.config_fixture.config(notification_format='basic') role_ref = unit.new_role_ref() self.role_api.create_role(role_ref['id'], role_ref) # The regular notifications will still be emitted, since they are # used for callback handling. self._assert_last_note(role_ref['id'], CREATED_OPERATION, 'role') # No audit event should have occurred self.assertEqual(0, len(self._audits)) def test_add_user_to_group(self): user_ref = unit.new_user_ref(domain_id=self.domain_id) user_ref = self.identity_api.create_user(user_ref) group_ref = unit.new_group_ref(domain_id=self.domain_id) group_ref = self.identity_api.create_group(group_ref) self.identity_api.add_user_to_group(user_ref['id'], group_ref['id']) self._assert_last_note(group_ref['id'], UPDATED_OPERATION, 'group', actor_id=user_ref['id'], actor_type='user', actor_operation='added') def test_remove_user_from_group(self): user_ref = unit.new_user_ref(domain_id=self.domain_id) user_ref = self.identity_api.create_user(user_ref) group_ref = unit.new_group_ref(domain_id=self.domain_id) group_ref = self.identity_api.create_group(group_ref) self.identity_api.add_user_to_group(user_ref['id'], group_ref['id']) self.identity_api.remove_user_from_group(user_ref['id'], group_ref['id']) self._assert_last_note(group_ref['id'], UPDATED_OPERATION, 'group', actor_id=user_ref['id'], actor_type='user', actor_operation='removed') class CADFNotificationsForEntities(NotificationsForEntities): def setUp(self): super(CADFNotificationsForEntities, self).setUp() self.config_fixture.config(notification_format='cadf') def test_initiator_data_is_set(self): ref = unit.new_domain_ref() resp = self.post('/domains', body={'domain': ref}) resource_id = resp.result.get('domain').get('id') self._assert_last_audit(resource_id, CREATED_OPERATION, 'domain', cadftaxonomy.SECURITY_DOMAIN) self._assert_initiator_data_is_set(CREATED_OPERATION, 'domain', cadftaxonomy.SECURITY_DOMAIN) class V2Notifications(BaseNotificationTest): def setUp(self): super(V2Notifications, self).setUp() self.config_fixture.config(notification_format='cadf') def test_user(self): token = self.get_scoped_token() resp = self.admin_request( method='POST', path='/v2.0/users', body={ 'user': { 'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex, 'enabled': True, }, }, token=token, ) user_id = resp.result.get('user').get('id') self._assert_initiator_data_is_set(CREATED_OPERATION, 'user', cadftaxonomy.SECURITY_ACCOUNT_USER) # test for delete user self.admin_request( method='DELETE', path='/v2.0/users/%s' % user_id, token=token, ) self._assert_initiator_data_is_set(DELETED_OPERATION, 'user', cadftaxonomy.SECURITY_ACCOUNT_USER) def test_role(self): token = self.get_scoped_token() resp = self.admin_request( method='POST', path='/v2.0/OS-KSADM/roles', body={ 'role': { 'name': uuid.uuid4().hex, 'description': uuid.uuid4().hex, }, }, token=token, ) role_id = resp.result.get('role').get('id') self._assert_initiator_data_is_set(CREATED_OPERATION, 'role', cadftaxonomy.SECURITY_ROLE) # test for delete role self.admin_request( method='DELETE', path='/v2.0/OS-KSADM/roles/%s' % role_id, token=token, ) self._assert_initiator_data_is_set(DELETED_OPERATION, 'role', cadftaxonomy.SECURITY_ROLE) def test_service_and_endpoint(self): token = self.get_scoped_token() resp = self.admin_request( method='POST', path='/v2.0/OS-KSADM/services', body={ 'OS-KSADM:service': { 'name': uuid.uuid4().hex, 'type': uuid.uuid4().hex, 'description': uuid.uuid4().hex, }, }, token=token, ) service_id = resp.result.get('OS-KSADM:service').get('id') self._assert_initiator_data_is_set(CREATED_OPERATION, 'service', cadftaxonomy.SECURITY_SERVICE) resp = self.admin_request( method='POST', path='/v2.0/endpoints', body={ 'endpoint': { 'region': uuid.uuid4().hex, 'service_id': service_id, 'publicurl': uuid.uuid4().hex, 'adminurl': uuid.uuid4().hex, 'internalurl': uuid.uuid4().hex, }, }, token=token, ) endpoint_id = resp.result.get('endpoint').get('id') self._assert_initiator_data_is_set(CREATED_OPERATION, 'endpoint', cadftaxonomy.SECURITY_ENDPOINT) # test for delete endpoint self.admin_request( method='DELETE', path='/v2.0/endpoints/%s' % endpoint_id, token=token, ) self._assert_initiator_data_is_set(DELETED_OPERATION, 'endpoint', cadftaxonomy.SECURITY_ENDPOINT) # test for delete service self.admin_request( method='DELETE', path='/v2.0/OS-KSADM/services/%s' % service_id, token=token, ) self._assert_initiator_data_is_set(DELETED_OPERATION, 'service', cadftaxonomy.SECURITY_SERVICE) def test_project(self): token = self.get_scoped_token() resp = self.admin_request( method='POST', path='/v2.0/tenants', body={ 'tenant': { 'name': uuid.uuid4().hex, 'description': uuid.uuid4().hex, 'enabled': True }, }, token=token, ) project_id = resp.result.get('tenant').get('id') self._assert_initiator_data_is_set(CREATED_OPERATION, 'project', cadftaxonomy.SECURITY_PROJECT) # test for delete project self.admin_request( method='DELETE', path='/v2.0/tenants/%s' % project_id, token=token, ) self._assert_initiator_data_is_set(DELETED_OPERATION, 'project', cadftaxonomy.SECURITY_PROJECT) class TestEventCallbacks(test_v3.RestfulTestCase): def setUp(self): super(TestEventCallbacks, self).setUp() self.has_been_called = False def _project_deleted_callback(self, service, resource_type, operation, payload): self.has_been_called = True def _project_created_callback(self, service, resource_type, operation, payload): self.has_been_called = True def test_notification_received(self): callback = register_callback(CREATED_OPERATION, 'project') project_ref = unit.new_project_ref(domain_id=self.domain_id) self.resource_api.create_project(project_ref['id'], project_ref) self.assertTrue(callback.called) def test_notification_method_not_callable(self): fake_method = None self.assertRaises(TypeError, notifications.register_event_callback, UPDATED_OPERATION, 'project', [fake_method]) def test_notification_event_not_valid(self): self.assertRaises(ValueError, notifications.register_event_callback, uuid.uuid4().hex, 'project', self._project_deleted_callback) def test_event_registration_for_unknown_resource_type(self): # Registration for unknown resource types should succeed. If no event # is issued for that resource type, the callback wont be triggered. notifications.register_event_callback(DELETED_OPERATION, uuid.uuid4().hex, self._project_deleted_callback) resource_type = uuid.uuid4().hex notifications.register_event_callback(DELETED_OPERATION, resource_type, self._project_deleted_callback) def test_provider_event_callback_subscription(self): callback_called = [] @notifications.listener class Foo(object): def __init__(self): self.event_callbacks = { CREATED_OPERATION: {'project': self.foo_callback}} def foo_callback(self, service, resource_type, operation, payload): # uses callback_called from the closure callback_called.append(True) Foo() project_ref = unit.new_project_ref(domain_id=self.domain_id) self.resource_api.create_project(project_ref['id'], project_ref) self.assertEqual([True], callback_called) def test_provider_event_callbacks_subscription(self): callback_called = [] @notifications.listener class Foo(object): def __init__(self): self.event_callbacks = { CREATED_OPERATION: { 'project': [self.callback_0, self.callback_1]}} def callback_0(self, service, resource_type, operation, payload): # uses callback_called from the closure callback_called.append('cb0') def callback_1(self, service, resource_type, operation, payload): # uses callback_called from the closure callback_called.append('cb1') Foo() project_ref = unit.new_project_ref(domain_id=self.domain_id) self.resource_api.create_project(project_ref['id'], project_ref) self.assertItemsEqual(['cb1', 'cb0'], callback_called) def test_invalid_event_callbacks(self): @notifications.listener class Foo(object): def __init__(self): self.event_callbacks = 'bogus' self.assertRaises(AttributeError, Foo) def test_invalid_event_callbacks_event(self): @notifications.listener class Foo(object): def __init__(self): self.event_callbacks = {CREATED_OPERATION: 'bogus'} self.assertRaises(AttributeError, Foo) def test_using_an_unbound_method_as_a_callback_fails(self): # NOTE(dstanek): An unbound method is when you reference a method # from a class object. You'll get a method that isn't bound to a # particular instance so there is no magic 'self'. You can call it, # but you have to pass in the instance manually like: C.m(C()). # If you reference the method from an instance then you get a method # that effectively curries the self argument for you # (think functools.partial). Obviously is we don't have an # instance then we can't call the method. @notifications.listener class Foo(object): def __init__(self): self.event_callbacks = {CREATED_OPERATION: {'project': Foo.callback}} def callback(self, *args): pass # TODO(dstanek): it would probably be nice to fail early using # something like: # self.assertRaises(TypeError, Foo) Foo() project_ref = unit.new_project_ref(domain_id=self.domain_id) self.assertRaises(TypeError, self.resource_api.create_project, project_ref['id'], project_ref) class CadfNotificationsWrapperTestCase(test_v3.RestfulTestCase): LOCAL_HOST = 'localhost' ACTION = 'authenticate' ROLE_ASSIGNMENT = 'role_assignment' def setUp(self): super(CadfNotificationsWrapperTestCase, self).setUp() self._notifications = [] def fake_notify(action, initiator, outcome, target, event_type, **kwargs): service_security = cadftaxonomy.SERVICE_SECURITY event = eventfactory.EventFactory().new_event( eventType=cadftype.EVENTTYPE_ACTIVITY, outcome=outcome, action=action, initiator=initiator, target=target, observer=cadfresource.Resource(typeURI=service_security)) for key, value in kwargs.items(): setattr(event, key, value) note = { 'action': action, 'initiator': initiator, 'event': event, 'event_type': event_type, 'send_notification_called': True} self._notifications.append(note) self.useFixture(mockpatch.PatchObject( notifications, '_send_audit_notification', fake_notify)) def _assert_last_note(self, action, user_id, event_type=None): self.assertTrue(self._notifications) note = self._notifications[-1] self.assertEqual(action, note['action']) initiator = note['initiator'] self.assertEqual(user_id, initiator.id) self.assertEqual(self.LOCAL_HOST, initiator.host.address) self.assertTrue(note['send_notification_called']) if event_type: self.assertEqual(event_type, note['event_type']) def _assert_event(self, role_id, project=None, domain=None, user=None, group=None, inherit=False): """Assert that the CADF event is valid. In the case of role assignments, the event will have extra data, specifically, the role, target, actor, and if the role is inherited. An example event, as a dictionary is seen below: { 'typeURI': 'http://schemas.dmtf.org/cloud/audit/1.0/event', 'initiator': { 'typeURI': 'service/security/account/user', 'host': {'address': 'localhost'}, 'id': 'openstack:0a90d95d-582c-4efb-9cbc-e2ca7ca9c341', 'name': u'bccc2d9bfc2a46fd9e33bcf82f0b5c21' }, 'target': { 'typeURI': 'service/security/account/user', 'id': 'openstack:d48ea485-ef70-4f65-8d2b-01aa9d7ec12d' }, 'observer': { 'typeURI': 'service/security', 'id': 'openstack:d51dd870-d929-4aba-8d75-dcd7555a0c95' }, 'eventType': 'activity', 'eventTime': '2014-08-21T21:04:56.204536+0000', 'role': u'0e6b990380154a2599ce6b6e91548a68', 'domain': u'24bdcff1aab8474895dbaac509793de1', 'inherited_to_projects': False, 'group': u'c1e22dc67cbd469ea0e33bf428fe597a', 'action': 'created.role_assignment', 'outcome': 'success', 'id': 'openstack:782689dd-f428-4f13-99c7-5c70f94a5ac1' } """ note = self._notifications[-1] event = note['event'] if project: self.assertEqual(project, event.project) if domain: self.assertEqual(domain, event.domain) if group: self.assertEqual(group, event.group) elif user: self.assertEqual(user, event.user) self.assertEqual(role_id, event.role) self.assertEqual(inherit, event.inherited_to_projects) def test_v3_authenticate_user_name_and_domain_id(self): user_id = self.user_id user_name = self.user['name'] password = self.user['password'] domain_id = self.domain_id data = self.build_authentication_request(username=user_name, user_domain_id=domain_id, password=password) self.post('/auth/tokens', body=data) self._assert_last_note(self.ACTION, user_id) def test_v3_authenticate_user_id(self): user_id = self.user_id password = self.user['password'] data = self.build_authentication_request(user_id=user_id, password=password) self.post('/auth/tokens', body=data) self._assert_last_note(self.ACTION, user_id) def test_v3_authenticate_user_name_and_domain_name(self): user_id = self.user_id user_name = self.user['name'] password = self.user['password'] domain_name = self.domain['name'] data = self.build_authentication_request(username=user_name, user_domain_name=domain_name, password=password) self.post('/auth/tokens', body=data) self._assert_last_note(self.ACTION, user_id) def _test_role_assignment(self, url, role, project=None, domain=None, user=None, group=None): self.put(url) action = "%s.%s" % (CREATED_OPERATION, self.ROLE_ASSIGNMENT) event_type = '%s.%s.%s' % (notifications.SERVICE, self.ROLE_ASSIGNMENT, CREATED_OPERATION) self._assert_last_note(action, self.user_id, event_type) self._assert_event(role, project, domain, user, group) self.delete(url) action = "%s.%s" % (DELETED_OPERATION, self.ROLE_ASSIGNMENT) event_type = '%s.%s.%s' % (notifications.SERVICE, self.ROLE_ASSIGNMENT, DELETED_OPERATION) self._assert_last_note(action, self.user_id, event_type) self._assert_event(role, project, domain, user, None) def test_user_project_grant(self): url = ('/projects/%s/users/%s/roles/%s' % (self.project_id, self.user_id, self.role_id)) self._test_role_assignment(url, self.role_id, project=self.project_id, user=self.user_id) def test_group_domain_grant(self): group_ref = unit.new_group_ref(domain_id=self.domain_id) group = self.identity_api.create_group(group_ref) self.identity_api.add_user_to_group(self.user_id, group['id']) url = ('/domains/%s/groups/%s/roles/%s' % (self.domain_id, group['id'], self.role_id)) self._test_role_assignment(url, self.role_id, domain=self.domain_id, user=self.user_id, group=group['id']) def test_add_role_to_user_and_project(self): # A notification is sent when add_role_to_user_and_project is called on # the assignment manager. project_ref = unit.new_project_ref(self.domain_id) project = self.resource_api.create_project( project_ref['id'], project_ref) tenant_id = project['id'] self.assignment_api.add_role_to_user_and_project( self.user_id, tenant_id, self.role_id) self.assertTrue(self._notifications) note = self._notifications[-1] self.assertEqual('created.role_assignment', note['action']) self.assertTrue(note['send_notification_called']) self._assert_event(self.role_id, project=tenant_id, user=self.user_id) def test_remove_role_from_user_and_project(self): # A notification is sent when remove_role_from_user_and_project is # called on the assignment manager. self.assignment_api.remove_role_from_user_and_project( self.user_id, self.project_id, self.role_id) self.assertTrue(self._notifications) note = self._notifications[-1] self.assertEqual('deleted.role_assignment', note['action']) self.assertTrue(note['send_notification_called']) self._assert_event(self.role_id, project=self.project_id, user=self.user_id) class TestCallbackRegistration(unit.BaseTestCase): def setUp(self): super(TestCallbackRegistration, self).setUp() self.mock_log = mock.Mock() # Force the callback logging to occur self.mock_log.logger.getEffectiveLevel.return_value = logging.DEBUG def verify_log_message(self, data): """Verify log message. Tests that use this are a little brittle because adding more logging can break them. TODO(dstanek): remove the need for this in a future refactoring """ log_fn = self.mock_log.debug self.assertEqual(len(data), log_fn.call_count) for datum in data: log_fn.assert_any_call(mock.ANY, datum) def test_a_function_callback(self): def callback(*args, **kwargs): pass resource_type = 'thing' with mock.patch('keystone.notifications.LOG', self.mock_log): notifications.register_event_callback( CREATED_OPERATION, resource_type, callback) callback = 'keystone.tests.unit.common.test_notifications.callback' expected_log_data = { 'callback': callback, 'event': 'identity.%s.created' % resource_type } self.verify_log_message([expected_log_data]) def test_a_method_callback(self): class C(object): def callback(self, *args, **kwargs): pass with mock.patch('keystone.notifications.LOG', self.mock_log): notifications.register_event_callback( CREATED_OPERATION, 'thing', C().callback) callback = 'keystone.tests.unit.common.test_notifications.C.callback' expected_log_data = { 'callback': callback, 'event': 'identity.thing.created' } self.verify_log_message([expected_log_data]) def test_a_list_of_callbacks(self): def callback(*args, **kwargs): pass class C(object): def callback(self, *args, **kwargs): pass with mock.patch('keystone.notifications.LOG', self.mock_log): notifications.register_event_callback( CREATED_OPERATION, 'thing', [callback, C().callback]) callback_1 = 'keystone.tests.unit.common.test_notifications.callback' callback_2 = 'keystone.tests.unit.common.test_notifications.C.callback' expected_log_data = [ { 'callback': callback_1, 'event': 'identity.thing.created' }, { 'callback': callback_2, 'event': 'identity.thing.created' }, ] self.verify_log_message(expected_log_data) def test_an_invalid_callback(self): self.assertRaises(TypeError, notifications.register_event_callback, (CREATED_OPERATION, 'thing', object())) def test_an_invalid_event(self): def callback(*args, **kwargs): pass self.assertRaises(ValueError, notifications.register_event_callback, uuid.uuid4().hex, 'thing', callback) keystone-9.0.0/keystone/tests/unit/common/test_ldap.py0000664000567000056710000005545312701407102024333 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import tempfile import uuid import fixtures import ldap.dn import mock from oslo_config import cfg from testtools import matchers from keystone.common import driver_hints from keystone.common import ldap as ks_ldap from keystone.common.ldap import core as common_ldap_core from keystone.tests import unit from keystone.tests.unit import default_fixtures from keystone.tests.unit import fakeldap from keystone.tests.unit.ksfixtures import database CONF = cfg.CONF class DnCompareTest(unit.BaseTestCase): """Tests for the DN comparison functions in keystone.common.ldap.core.""" def test_prep(self): # prep_case_insensitive returns the string with spaces at the front and # end if it's already lowercase and no insignificant characters. value = 'lowercase value' self.assertEqual(value, ks_ldap.prep_case_insensitive(value)) def test_prep_lowercase(self): # prep_case_insensitive returns the string with spaces at the front and # end and lowercases the value. value = 'UPPERCASE VALUE' exp_value = value.lower() self.assertEqual(exp_value, ks_ldap.prep_case_insensitive(value)) def test_prep_insignificant(self): # prep_case_insensitive remove insignificant spaces. value = 'before after' exp_value = 'before after' self.assertEqual(exp_value, ks_ldap.prep_case_insensitive(value)) def test_prep_insignificant_pre_post(self): # prep_case_insensitive remove insignificant spaces. value = ' value ' exp_value = 'value' self.assertEqual(exp_value, ks_ldap.prep_case_insensitive(value)) def test_ava_equal_same(self): # is_ava_value_equal returns True if the two values are the same. value = 'val1' self.assertTrue(ks_ldap.is_ava_value_equal('cn', value, value)) def test_ava_equal_complex(self): # is_ava_value_equal returns True if the two values are the same using # a value that's got different capitalization and insignificant chars. val1 = 'before after' val2 = ' BEFORE afTer ' self.assertTrue(ks_ldap.is_ava_value_equal('cn', val1, val2)) def test_ava_different(self): # is_ava_value_equal returns False if the values aren't the same. self.assertFalse(ks_ldap.is_ava_value_equal('cn', 'val1', 'val2')) def test_rdn_same(self): # is_rdn_equal returns True if the two values are the same. rdn = ldap.dn.str2dn('cn=val1')[0] self.assertTrue(ks_ldap.is_rdn_equal(rdn, rdn)) def test_rdn_diff_length(self): # is_rdn_equal returns False if the RDNs have a different number of # AVAs. rdn1 = ldap.dn.str2dn('cn=cn1')[0] rdn2 = ldap.dn.str2dn('cn=cn1+ou=ou1')[0] self.assertFalse(ks_ldap.is_rdn_equal(rdn1, rdn2)) def test_rdn_multi_ava_same_order(self): # is_rdn_equal returns True if the RDNs have the same number of AVAs # and the values are the same. rdn1 = ldap.dn.str2dn('cn=cn1+ou=ou1')[0] rdn2 = ldap.dn.str2dn('cn=CN1+ou=OU1')[0] self.assertTrue(ks_ldap.is_rdn_equal(rdn1, rdn2)) def test_rdn_multi_ava_diff_order(self): # is_rdn_equal returns True if the RDNs have the same number of AVAs # and the values are the same, even if in a different order rdn1 = ldap.dn.str2dn('cn=cn1+ou=ou1')[0] rdn2 = ldap.dn.str2dn('ou=OU1+cn=CN1')[0] self.assertTrue(ks_ldap.is_rdn_equal(rdn1, rdn2)) def test_rdn_multi_ava_diff_type(self): # is_rdn_equal returns False if the RDNs have the same number of AVAs # and the attribute types are different. rdn1 = ldap.dn.str2dn('cn=cn1+ou=ou1')[0] rdn2 = ldap.dn.str2dn('cn=cn1+sn=sn1')[0] self.assertFalse(ks_ldap.is_rdn_equal(rdn1, rdn2)) def test_rdn_attr_type_case_diff(self): # is_rdn_equal returns True for same RDNs even when attr type case is # different. rdn1 = ldap.dn.str2dn('cn=cn1')[0] rdn2 = ldap.dn.str2dn('CN=cn1')[0] self.assertTrue(ks_ldap.is_rdn_equal(rdn1, rdn2)) def test_rdn_attr_type_alias(self): # is_rdn_equal returns False for same RDNs even when attr type alias is # used. Note that this is a limitation since an LDAP server should # consider them equal. rdn1 = ldap.dn.str2dn('cn=cn1')[0] rdn2 = ldap.dn.str2dn('2.5.4.3=cn1')[0] self.assertFalse(ks_ldap.is_rdn_equal(rdn1, rdn2)) def test_dn_same(self): # is_dn_equal returns True if the DNs are the same. dn = 'cn=Babs Jansen,ou=OpenStack' self.assertTrue(ks_ldap.is_dn_equal(dn, dn)) def test_dn_equal_unicode(self): # is_dn_equal can accept unicode dn = u'cn=fäké,ou=OpenStack' self.assertTrue(ks_ldap.is_dn_equal(dn, dn)) def test_dn_diff_length(self): # is_dn_equal returns False if the DNs don't have the same number of # RDNs dn1 = 'cn=Babs Jansen,ou=OpenStack' dn2 = 'cn=Babs Jansen,ou=OpenStack,dc=example.com' self.assertFalse(ks_ldap.is_dn_equal(dn1, dn2)) def test_dn_equal_rdns(self): # is_dn_equal returns True if the DNs have the same number of RDNs # and each RDN is the same. dn1 = 'cn=Babs Jansen,ou=OpenStack+cn=OpenSource' dn2 = 'CN=Babs Jansen,cn=OpenSource+ou=OpenStack' self.assertTrue(ks_ldap.is_dn_equal(dn1, dn2)) def test_dn_parsed_dns(self): # is_dn_equal can also accept parsed DNs. dn_str1 = ldap.dn.str2dn('cn=Babs Jansen,ou=OpenStack+cn=OpenSource') dn_str2 = ldap.dn.str2dn('CN=Babs Jansen,cn=OpenSource+ou=OpenStack') self.assertTrue(ks_ldap.is_dn_equal(dn_str1, dn_str2)) def test_startswith_under_child(self): # dn_startswith returns True if descendant_dn is a child of dn. child = 'cn=Babs Jansen,ou=OpenStack' parent = 'ou=OpenStack' self.assertTrue(ks_ldap.dn_startswith(child, parent)) def test_startswith_parent(self): # dn_startswith returns False if descendant_dn is a parent of dn. child = 'cn=Babs Jansen,ou=OpenStack' parent = 'ou=OpenStack' self.assertFalse(ks_ldap.dn_startswith(parent, child)) def test_startswith_same(self): # dn_startswith returns False if DNs are the same. dn = 'cn=Babs Jansen,ou=OpenStack' self.assertFalse(ks_ldap.dn_startswith(dn, dn)) def test_startswith_not_parent(self): # dn_startswith returns False if descendant_dn is not under the dn child = 'cn=Babs Jansen,ou=OpenStack' parent = 'dc=example.com' self.assertFalse(ks_ldap.dn_startswith(child, parent)) def test_startswith_descendant(self): # dn_startswith returns True if descendant_dn is a descendant of dn. descendant = 'cn=Babs Jansen,ou=Keystone,ou=OpenStack,dc=example.com' dn = 'ou=OpenStack,dc=example.com' self.assertTrue(ks_ldap.dn_startswith(descendant, dn)) descendant = 'uid=12345,ou=Users,dc=example,dc=com' dn = 'ou=Users,dc=example,dc=com' self.assertTrue(ks_ldap.dn_startswith(descendant, dn)) def test_startswith_parsed_dns(self): # dn_startswith also accepts parsed DNs. descendant = ldap.dn.str2dn('cn=Babs Jansen,ou=OpenStack') dn = ldap.dn.str2dn('ou=OpenStack') self.assertTrue(ks_ldap.dn_startswith(descendant, dn)) def test_startswith_unicode(self): # dn_startswith accepts unicode. child = u'cn=fäké,ou=OpenStäck' parent = u'ou=OpenStäck' self.assertTrue(ks_ldap.dn_startswith(child, parent)) class LDAPDeleteTreeTest(unit.TestCase): def setUp(self): super(LDAPDeleteTreeTest, self).setUp() ks_ldap.register_handler('fake://', fakeldap.FakeLdapNoSubtreeDelete) self.useFixture(database.Database(self.sql_driver_version_overrides)) self.load_backends() self.load_fixtures(default_fixtures) self.addCleanup(self.clear_database) self.addCleanup(common_ldap_core._HANDLERS.clear) def clear_database(self): for shelf in fakeldap.FakeShelves: fakeldap.FakeShelves[shelf].clear() def config_overrides(self): super(LDAPDeleteTreeTest, self).config_overrides() self.config_fixture.config(group='identity', driver='ldap') def config_files(self): config_files = super(LDAPDeleteTreeTest, self).config_files() config_files.append(unit.dirs.tests_conf('backend_ldap.conf')) return config_files def test_delete_tree(self): """Test manually deleting a tree. Few LDAP servers support CONTROL_DELETETREE. This test exercises the alternate code paths in BaseLdap.delete_tree. """ conn = self.identity_api.user.get_connection() id_attr = self.identity_api.user.id_attr objclass = self.identity_api.user.object_class.lower() tree_dn = self.identity_api.user.tree_dn def create_entry(name, parent_dn=None): if not parent_dn: parent_dn = tree_dn dn = '%s=%s,%s' % (id_attr, name, parent_dn) attrs = [('objectclass', [objclass, 'ldapsubentry']), (id_attr, [name])] conn.add_s(dn, attrs) return dn # create 3 entries like this: # cn=base # cn=child,cn=base # cn=grandchild,cn=child,cn=base # then attempt to delete_tree(cn=base) base_id = 'base' base_dn = create_entry(base_id) child_dn = create_entry('child', base_dn) grandchild_dn = create_entry('grandchild', child_dn) # verify that the three entries were created scope = ldap.SCOPE_SUBTREE filt = '(|(objectclass=*)(objectclass=ldapsubentry))' entries = conn.search_s(base_dn, scope, filt, attrlist=common_ldap_core.DN_ONLY) self.assertThat(entries, matchers.HasLength(3)) sort_ents = sorted([e[0] for e in entries], key=len, reverse=True) self.assertEqual([grandchild_dn, child_dn, base_dn], sort_ents) # verify that a non-leaf node can't be deleted directly by the # LDAP server self.assertRaises(ldap.NOT_ALLOWED_ON_NONLEAF, conn.delete_s, base_dn) self.assertRaises(ldap.NOT_ALLOWED_ON_NONLEAF, conn.delete_s, child_dn) # call our delete_tree implementation self.identity_api.user.delete_tree(base_id) self.assertRaises(ldap.NO_SUCH_OBJECT, conn.search_s, base_dn, ldap.SCOPE_BASE) self.assertRaises(ldap.NO_SUCH_OBJECT, conn.search_s, child_dn, ldap.SCOPE_BASE) self.assertRaises(ldap.NO_SUCH_OBJECT, conn.search_s, grandchild_dn, ldap.SCOPE_BASE) class MultiURLTests(unit.TestCase): """Tests for setting multiple LDAP URLs.""" def test_multiple_urls_with_comma_no_conn_pool(self): urls = 'ldap://localhost,ldap://backup.localhost' self.config_fixture.config(group='ldap', url=urls, use_pool=False) base_ldap = ks_ldap.BaseLdap(CONF) ldap_connection = base_ldap.get_connection() self.assertEqual(urls, ldap_connection.conn.conn._uri) def test_multiple_urls_with_comma_with_conn_pool(self): urls = 'ldap://localhost,ldap://backup.localhost' self.config_fixture.config(group='ldap', url=urls, use_pool=True) base_ldap = ks_ldap.BaseLdap(CONF) ldap_connection = base_ldap.get_connection() self.assertEqual(urls, ldap_connection.conn.conn_pool.uri) class SslTlsTest(unit.TestCase): """Tests for the SSL/TLS functionality in keystone.common.ldap.core.""" @mock.patch.object(ks_ldap.core.KeystoneLDAPHandler, 'simple_bind_s') @mock.patch.object(ldap.ldapobject.LDAPObject, 'start_tls_s') def _init_ldap_connection(self, config, mock_ldap_one, mock_ldap_two): # Attempt to connect to initialize python-ldap. base_ldap = ks_ldap.BaseLdap(config) base_ldap.get_connection() def test_certfile_trust_tls(self): # We need this to actually exist, so we create a tempfile. (handle, certfile) = tempfile.mkstemp() self.addCleanup(os.unlink, certfile) self.addCleanup(os.close, handle) self.config_fixture.config(group='ldap', url='ldap://localhost', use_tls=True, tls_cacertfile=certfile) self._init_ldap_connection(CONF) # Ensure the cert trust option is set. self.assertEqual(certfile, ldap.get_option(ldap.OPT_X_TLS_CACERTFILE)) def test_certdir_trust_tls(self): # We need this to actually exist, so we create a tempdir. certdir = self.useFixture(fixtures.TempDir()).path self.config_fixture.config(group='ldap', url='ldap://localhost', use_tls=True, tls_cacertdir=certdir) self._init_ldap_connection(CONF) # Ensure the cert trust option is set. self.assertEqual(certdir, ldap.get_option(ldap.OPT_X_TLS_CACERTDIR)) def test_certfile_trust_ldaps(self): # We need this to actually exist, so we create a tempfile. (handle, certfile) = tempfile.mkstemp() self.addCleanup(os.unlink, certfile) self.addCleanup(os.close, handle) self.config_fixture.config(group='ldap', url='ldaps://localhost', use_tls=False, tls_cacertfile=certfile) self._init_ldap_connection(CONF) # Ensure the cert trust option is set. self.assertEqual(certfile, ldap.get_option(ldap.OPT_X_TLS_CACERTFILE)) def test_certdir_trust_ldaps(self): # We need this to actually exist, so we create a tempdir. certdir = self.useFixture(fixtures.TempDir()).path self.config_fixture.config(group='ldap', url='ldaps://localhost', use_tls=False, tls_cacertdir=certdir) self._init_ldap_connection(CONF) # Ensure the cert trust option is set. self.assertEqual(certdir, ldap.get_option(ldap.OPT_X_TLS_CACERTDIR)) class LDAPPagedResultsTest(unit.TestCase): """Tests the paged results functionality in keystone.common.ldap.core.""" def setUp(self): super(LDAPPagedResultsTest, self).setUp() self.clear_database() ks_ldap.register_handler('fake://', fakeldap.FakeLdap) self.addCleanup(common_ldap_core._HANDLERS.clear) self.useFixture(database.Database(self.sql_driver_version_overrides)) self.load_backends() self.load_fixtures(default_fixtures) def clear_database(self): for shelf in fakeldap.FakeShelves: fakeldap.FakeShelves[shelf].clear() def config_overrides(self): super(LDAPPagedResultsTest, self).config_overrides() self.config_fixture.config(group='identity', driver='ldap') def config_files(self): config_files = super(LDAPPagedResultsTest, self).config_files() config_files.append(unit.dirs.tests_conf('backend_ldap.conf')) return config_files @mock.patch.object(fakeldap.FakeLdap, 'search_ext') @mock.patch.object(fakeldap.FakeLdap, 'result3') def test_paged_results_control_api(self, mock_result3, mock_search_ext): mock_result3.return_value = ('', [], 1, []) self.config_fixture.config(group='ldap', page_size=1) conn = self.identity_api.user.get_connection() conn._paged_search_s('dc=example,dc=test', ldap.SCOPE_SUBTREE, 'objectclass=*') class CommonLdapTestCase(unit.BaseTestCase): """These test cases call functions in keystone.common.ldap.""" def test_binary_attribute_values(self): result = [( 'cn=junk,dc=example,dc=com', { 'cn': ['junk'], 'sn': [uuid.uuid4().hex], 'mail': [uuid.uuid4().hex], 'binary_attr': ['\x00\xFF\x00\xFF'] } ), ] py_result = ks_ldap.convert_ldap_result(result) # The attribute containing the binary value should # not be present in the converted result. self.assertNotIn('binary_attr', py_result[0][1]) def test_utf8_conversion(self): value_unicode = u'fäké1' value_utf8 = value_unicode.encode('utf-8') result_utf8 = ks_ldap.utf8_encode(value_unicode) self.assertEqual(value_utf8, result_utf8) result_utf8 = ks_ldap.utf8_encode(value_utf8) self.assertEqual(value_utf8, result_utf8) result_unicode = ks_ldap.utf8_decode(value_utf8) self.assertEqual(value_unicode, result_unicode) result_unicode = ks_ldap.utf8_decode(value_unicode) self.assertEqual(value_unicode, result_unicode) self.assertRaises(TypeError, ks_ldap.utf8_encode, 100) result_unicode = ks_ldap.utf8_decode(100) self.assertEqual(u'100', result_unicode) def test_user_id_begins_with_0(self): user_id = '0123456' result = [( 'cn=dummy,dc=example,dc=com', { 'user_id': [user_id], 'enabled': ['TRUE'] } ), ] py_result = ks_ldap.convert_ldap_result(result) # The user id should be 0123456, and the enabled # flag should be True self.assertIs(py_result[0][1]['enabled'][0], True) self.assertEqual(user_id, py_result[0][1]['user_id'][0]) def test_user_id_begins_with_0_and_enabled_bit_mask(self): user_id = '0123456' bitmask = '225' expected_bitmask = 225 result = [( 'cn=dummy,dc=example,dc=com', { 'user_id': [user_id], 'enabled': [bitmask] } ), ] py_result = ks_ldap.convert_ldap_result(result) # The user id should be 0123456, and the enabled # flag should be 225 self.assertEqual(expected_bitmask, py_result[0][1]['enabled'][0]) self.assertEqual(user_id, py_result[0][1]['user_id'][0]) def test_user_id_and_bitmask_begins_with_0(self): user_id = '0123456' bitmask = '0225' expected_bitmask = 225 result = [( 'cn=dummy,dc=example,dc=com', { 'user_id': [user_id], 'enabled': [bitmask] } ), ] py_result = ks_ldap.convert_ldap_result(result) # The user id should be 0123456, and the enabled # flag should be 225, the 0 is dropped. self.assertEqual(expected_bitmask, py_result[0][1]['enabled'][0]) self.assertEqual(user_id, py_result[0][1]['user_id'][0]) def test_user_id_and_user_name_with_boolean_string(self): boolean_strings = ['TRUE', 'FALSE', 'true', 'false', 'True', 'False', 'TrUe' 'FaLse'] for user_name in boolean_strings: user_id = uuid.uuid4().hex result = [( 'cn=dummy,dc=example,dc=com', { 'user_id': [user_id], 'user_name': [user_name] } ), ] py_result = ks_ldap.convert_ldap_result(result) # The user name should still be a string value. self.assertEqual(user_name, py_result[0][1]['user_name'][0]) class LDAPFilterQueryCompositionTest(unit.TestCase): """These test cases test LDAP filter generation.""" def setUp(self): super(LDAPFilterQueryCompositionTest, self).setUp() self.base_ldap = ks_ldap.BaseLdap(self.config_fixture.conf) # The tests need an attribute mapping to use. self.attribute_name = uuid.uuid4().hex self.filter_attribute_name = uuid.uuid4().hex self.base_ldap.attribute_mapping = { self.attribute_name: self.filter_attribute_name } def test_return_query_with_no_hints(self): hints = driver_hints.Hints() # NOTE: doesn't have to be a real query, we just need to make sure the # same string is returned if there are no hints. query = uuid.uuid4().hex self.assertEqual(query, self.base_ldap.filter_query(hints=hints, query=query)) # make sure the default query is an empty string self.assertEqual('', self.base_ldap.filter_query(hints=hints)) def test_filter_with_empty_query_and_hints_set(self): hints = driver_hints.Hints() username = uuid.uuid4().hex hints.add_filter(name=self.attribute_name, value=username, comparator='equals', case_sensitive=False) expected_ldap_filter = '(&(%s=%s))' % ( self.filter_attribute_name, username) self.assertEqual(expected_ldap_filter, self.base_ldap.filter_query(hints=hints)) def test_filter_with_both_query_and_hints_set(self): hints = driver_hints.Hints() # NOTE: doesn't have to be a real query, we just need to make sure the # filter string is concatenated correctly query = uuid.uuid4().hex username = uuid.uuid4().hex expected_result = '(&%(query)s(%(user_name_attr)s=%(username)s))' % ( {'query': query, 'user_name_attr': self.filter_attribute_name, 'username': username}) hints.add_filter(self.attribute_name, username) self.assertEqual(expected_result, self.base_ldap.filter_query(hints=hints, query=query)) def test_filter_with_hints_and_query_is_none(self): hints = driver_hints.Hints() username = uuid.uuid4().hex hints.add_filter(name=self.attribute_name, value=username, comparator='equals', case_sensitive=False) expected_ldap_filter = '(&(%s=%s))' % ( self.filter_attribute_name, username) self.assertEqual(expected_ldap_filter, self.base_ldap.filter_query(hints=hints, query=None)) keystone-9.0.0/keystone/tests/unit/test_v3_trust.py0000664000567000056710000004056512701407105023715 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import uuid from six.moves import http_client from keystone.tests import unit from keystone.tests.unit import test_v3 class TestTrustOperations(test_v3.RestfulTestCase): """Test module for create, read, update and delete operations on trusts. This module is specific to tests for trust CRUD operations. All other tests related to trusts that are authentication or authorization specific should live in in the keystone/tests/unit/test_v3_auth.py module. """ def setUp(self): super(TestTrustOperations, self).setUp() # create a trustee to delegate stuff to self.trustee_user = unit.create_user(self.identity_api, domain_id=self.domain_id) self.trustee_user_id = self.trustee_user['id'] def test_create_trust_bad_request(self): # The server returns a 403 Forbidden rather than a 400 Bad Request, see # bug 1133435 self.post('/OS-TRUST/trusts', body={'trust': {}}, expected_status=http_client.FORBIDDEN) def test_trust_crud(self): # create a new trust ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user_id, project_id=self.project_id, role_ids=[self.role_id]) r = self.post('/OS-TRUST/trusts', body={'trust': ref}) trust = self.assertValidTrustResponse(r, ref) # get the trust r = self.get( '/OS-TRUST/trusts/%(trust_id)s' % {'trust_id': trust['id']}) self.assertValidTrustResponse(r, ref) # validate roles on the trust r = self.get( '/OS-TRUST/trusts/%(trust_id)s/roles' % { 'trust_id': trust['id']}) roles = self.assertValidRoleListResponse(r, self.role) self.assertIn(self.role['id'], [x['id'] for x in roles]) self.head( '/OS-TRUST/trusts/%(trust_id)s/roles/%(role_id)s' % { 'trust_id': trust['id'], 'role_id': self.role['id']}, expected_status=http_client.OK) r = self.get( '/OS-TRUST/trusts/%(trust_id)s/roles/%(role_id)s' % { 'trust_id': trust['id'], 'role_id': self.role['id']}) self.assertValidRoleResponse(r, self.role) # list all trusts r = self.get('/OS-TRUST/trusts') self.assertValidTrustListResponse(r, trust) # trusts are immutable self.patch( '/OS-TRUST/trusts/%(trust_id)s' % {'trust_id': trust['id']}, body={'trust': ref}, expected_status=http_client.NOT_FOUND) # delete the trust self.delete( '/OS-TRUST/trusts/%(trust_id)s' % {'trust_id': trust['id']}) # ensure the trust is not found self.get( '/OS-TRUST/trusts/%(trust_id)s' % {'trust_id': trust['id']}, expected_status=http_client.NOT_FOUND) def test_list_trusts(self): # create three trusts with the same trustor and trustee ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user_id, project_id=self.project_id, impersonation=False, expires=dict(minutes=1), role_ids=[self.role_id]) for i in range(3): ref['expires_at'] = datetime.datetime.utcnow().replace( year=2032).strftime(unit.TIME_FORMAT) r = self.post('/OS-TRUST/trusts', body={'trust': ref}) self.assertValidTrustResponse(r, ref) # list all trusts r = self.get('/OS-TRUST/trusts') trusts = r.result['trusts'] self.assertEqual(3, len(trusts)) self.assertValidTrustListResponse(r) # list all trusts for the trustor r = self.get('/OS-TRUST/trusts?trustor_user_id=%s' % self.user_id) trusts = r.result['trusts'] self.assertEqual(3, len(trusts)) self.assertValidTrustListResponse(r) # list all trusts as the trustor as the trustee. r = self.get('/OS-TRUST/trusts?trustee_user_id=%s' % self.user_id) trusts = r.result['trusts'] self.assertEqual(0, len(trusts)) # list all trusts as the trustee is forbidden r = self.get('/OS-TRUST/trusts?trustee_user_id=%s' % self.trustee_user_id, expected_status=http_client.FORBIDDEN) def test_delete_trust(self): # create a trust ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user_id, project_id=self.project_id, impersonation=False, expires=dict(minutes=1), role_ids=[self.role_id]) r = self.post('/OS-TRUST/trusts', body={'trust': ref}) trust = self.assertValidTrustResponse(r, ref) # delete the trust self.delete('/OS-TRUST/trusts/%(trust_id)s' % { 'trust_id': trust['id']}) # ensure the trust isn't found self.get('/OS-TRUST/trusts/%(trust_id)s' % { 'trust_id': trust['id']}, expected_status=http_client.NOT_FOUND) def test_create_trust_without_trustee_returns_bad_request(self): ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user_id, project_id=self.project_id, role_ids=[self.role_id]) # trustee_user_id is required to create a trust del ref['trustee_user_id'] self.post('/OS-TRUST/trusts', body={'trust': ref}, expected_status=http_client.BAD_REQUEST) def test_create_trust_without_impersonation_returns_bad_request(self): ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user_id, project_id=self.project_id, role_ids=[self.role_id]) # impersonation is required to create a trust del ref['impersonation'] self.post('/OS-TRUST/trusts', body={'trust': ref}, expected_status=http_client.BAD_REQUEST) def test_create_trust_with_bad_remaining_uses_returns_bad_request(self): # negative numbers, strings, non-integers, and 0 are not value values for value in [-1, 0, "a bad value", 7.2]: ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user_id, project_id=self.project_id, remaining_uses=value, role_ids=[self.role_id]) self.post('/OS-TRUST/trusts', body={'trust': ref}, expected_status=http_client.BAD_REQUEST) def test_create_trust_with_non_existant_trustee_returns_not_found(self): ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=uuid.uuid4().hex, project_id=self.project_id, role_ids=[self.role_id]) self.post('/OS-TRUST/trusts', body={'trust': ref}, expected_status=http_client.NOT_FOUND) def test_create_trust_with_trustee_as_trustor_returns_forbidden(self): ref = unit.new_trust_ref( trustor_user_id=self.trustee_user_id, trustee_user_id=self.user_id, project_id=self.project_id, role_ids=[self.role_id]) # NOTE(lbragstad): This fails because the user making the request isn't # the trustor defined in the request. self.post('/OS-TRUST/trusts', body={'trust': ref}, expected_status=http_client.FORBIDDEN) def test_create_trust_with_non_existant_project_returns_not_found(self): ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user_id, project_id=uuid.uuid4().hex, role_ids=[self.role_id]) self.post('/OS-TRUST/trusts', body={'trust': ref}, expected_status=http_client.NOT_FOUND) def test_create_trust_with_non_existant_role_id_returns_not_found(self): ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user_id, project_id=self.project_id, role_ids=[uuid.uuid4().hex]) self.post('/OS-TRUST/trusts', body={'trust': ref}, expected_status=http_client.NOT_FOUND) def test_create_trust_with_non_existant_role_name_returns_not_found(self): ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user_id, project_id=self.project_id, role_names=[uuid.uuid4().hex]) self.post('/OS-TRUST/trusts', body={'trust': ref}, expected_status=http_client.NOT_FOUND) def test_validate_trust_scoped_token_against_v2_returns_unauthorized(self): # create a new trust ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.default_domain_user_id, project_id=self.project_id, impersonation=False, expires=dict(minutes=1), role_ids=[self.role_id]) r = self.post('/OS-TRUST/trusts', body={'trust': ref}) trust = self.assertValidTrustResponse(r) # get a v3 trust-scoped token as the trustee auth_data = self.build_authentication_request( user_id=self.default_domain_user['id'], password=self.default_domain_user['password'], trust_id=trust['id']) r = self.v3_create_token(auth_data) self.assertValidProjectScopedTokenResponse( r, self.default_domain_user) token = r.headers.get('X-Subject-Token') # now validate the v3 token with v2 API path = '/v2.0/tokens/%s' % (token) self.admin_request( path=path, token=self.get_admin_token(), method='GET', expected_status=http_client.UNAUTHORIZED) def test_v3_v2_intermix_trustor_not_in_default_domain_failed(self): # get a project-scoped token auth_data = self.build_authentication_request( user_id=self.default_domain_user['id'], password=self.default_domain_user['password'], project_id=self.default_domain_project_id) token = self.get_requested_token(auth_data) # create a new trust ref = unit.new_trust_ref( trustor_user_id=self.default_domain_user_id, trustee_user_id=self.trustee_user_id, project_id=self.default_domain_project_id, impersonation=False, expires=dict(minutes=1), role_ids=[self.role_id]) r = self.post('/OS-TRUST/trusts', body={'trust': ref}, token=token) trust = self.assertValidTrustResponse(r) # get a trust-scoped token as the trustee auth_data = self.build_authentication_request( user_id=self.trustee_user['id'], password=self.trustee_user['password'], trust_id=trust['id']) r = self.v3_create_token(auth_data) self.assertValidProjectScopedTokenResponse( r, self.trustee_user) token = r.headers.get('X-Subject-Token') # now validate the v3 token with v2 API path = '/v2.0/tokens/%s' % (token) self.admin_request( path=path, token=self.get_admin_token(), method='GET', expected_status=http_client.UNAUTHORIZED) def test_v3_v2_intermix_project_not_in_default_domain_failed(self): # create a trustee in default domain to delegate stuff to trustee_user = unit.create_user(self.identity_api, domain_id=test_v3.DEFAULT_DOMAIN_ID) trustee_user_id = trustee_user['id'] # create a new trust ref = unit.new_trust_ref( trustor_user_id=self.default_domain_user_id, trustee_user_id=trustee_user_id, project_id=self.project_id, impersonation=False, expires=dict(minutes=1), role_ids=[self.role_id]) # get a project-scoped token as the default_domain_user auth_data = self.build_authentication_request( user_id=self.default_domain_user['id'], password=self.default_domain_user['password'], project_id=self.default_domain_project_id) token = self.get_requested_token(auth_data) r = self.post('/OS-TRUST/trusts', body={'trust': ref}, token=token) trust = self.assertValidTrustResponse(r) # get a trust-scoped token as the trustee auth_data = self.build_authentication_request( user_id=trustee_user['id'], password=trustee_user['password'], trust_id=trust['id']) r = self.v3_create_token(auth_data) self.assertValidProjectScopedTokenResponse(r, trustee_user) token = r.headers.get('X-Subject-Token') # ensure the token is invalid against v2 path = '/v2.0/tokens/%s' % (token) self.admin_request( path=path, token=self.get_admin_token(), method='GET', expected_status=http_client.UNAUTHORIZED) def test_exercise_trust_scoped_token_without_impersonation(self): # create a new trust ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user_id, project_id=self.project_id, impersonation=False, expires=dict(minutes=1), role_ids=[self.role_id]) resp = self.post('/OS-TRUST/trusts', body={'trust': ref}) trust = self.assertValidTrustResponse(resp) # get a trust-scoped token as the trustee auth_data = self.build_authentication_request( user_id=self.trustee_user['id'], password=self.trustee_user['password'], trust_id=trust['id']) resp = self.v3_create_token(auth_data) resp_body = resp.json_body['token'] self.assertValidProjectScopedTokenResponse(resp, self.trustee_user) self.assertEqual(self.trustee_user['id'], resp_body['user']['id']) self.assertEqual(self.trustee_user['name'], resp_body['user']['name']) self.assertEqual(self.domain['id'], resp_body['user']['domain']['id']) self.assertEqual(self.domain['name'], resp_body['user']['domain']['name']) self.assertEqual(self.project['id'], resp_body['project']['id']) self.assertEqual(self.project['name'], resp_body['project']['name']) def test_exercise_trust_scoped_token_with_impersonation(self): # create a new trust ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user_id, project_id=self.project_id, impersonation=True, expires=dict(minutes=1), role_ids=[self.role_id]) resp = self.post('/OS-TRUST/trusts', body={'trust': ref}) trust = self.assertValidTrustResponse(resp) # get a trust-scoped token as the trustee auth_data = self.build_authentication_request( user_id=self.trustee_user['id'], password=self.trustee_user['password'], trust_id=trust['id']) resp = self.v3_create_token(auth_data) resp_body = resp.json_body['token'] self.assertValidProjectScopedTokenResponse(resp, self.user) self.assertEqual(self.user['id'], resp_body['user']['id']) self.assertEqual(self.user['name'], resp_body['user']['name']) self.assertEqual(self.domain['id'], resp_body['user']['domain']['id']) self.assertEqual(self.domain['name'], resp_body['user']['domain']['name']) self.assertEqual(self.project['id'], resp_body['project']['id']) self.assertEqual(self.project['name'], resp_body['project']['name']) keystone-9.0.0/keystone/tests/unit/test_versions.py0000664000567000056710000012757212701407102023775 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import functools import random import mock from oslo_config import cfg from oslo_serialization import jsonutils from six.moves import http_client from testtools import matchers as tt_matchers import webob from keystone.common import json_home from keystone.tests import unit from keystone.tests.unit import utils from keystone.version import controllers CONF = cfg.CONF v2_MEDIA_TYPES = [ { "base": "application/json", "type": "application/" "vnd.openstack.identity-v2.0+json" } ] v2_HTML_DESCRIPTION = { "rel": "describedby", "type": "text/html", "href": "http://docs.openstack.org/" } v2_EXPECTED_RESPONSE = { "id": "v2.0", "status": "stable", "updated": "2014-04-17T00:00:00Z", "links": [ { "rel": "self", "href": "", # Will get filled in after initialization }, v2_HTML_DESCRIPTION ], "media-types": v2_MEDIA_TYPES } v2_VERSION_RESPONSE = { "version": v2_EXPECTED_RESPONSE } v3_MEDIA_TYPES = [ { "base": "application/json", "type": "application/" "vnd.openstack.identity-v3+json" } ] v3_EXPECTED_RESPONSE = { "id": "v3.6", "status": "stable", "updated": "2016-04-04T00:00:00Z", "links": [ { "rel": "self", "href": "", # Will get filled in after initialization } ], "media-types": v3_MEDIA_TYPES } v3_VERSION_RESPONSE = { "version": v3_EXPECTED_RESPONSE } VERSIONS_RESPONSE = { "versions": { "values": [ v3_EXPECTED_RESPONSE, v2_EXPECTED_RESPONSE ] } } _build_ec2tokens_relation = functools.partial( json_home.build_v3_extension_resource_relation, extension_name='OS-EC2', extension_version='1.0') REVOCATIONS_RELATION = json_home.build_v3_extension_resource_relation( 'OS-PKI', '1.0', 'revocations') _build_simple_cert_relation = functools.partial( json_home.build_v3_extension_resource_relation, extension_name='OS-SIMPLE-CERT', extension_version='1.0') _build_trust_relation = functools.partial( json_home.build_v3_extension_resource_relation, extension_name='OS-TRUST', extension_version='1.0') _build_federation_rel = functools.partial( json_home.build_v3_extension_resource_relation, extension_name='OS-FEDERATION', extension_version='1.0') _build_oauth1_rel = functools.partial( json_home.build_v3_extension_resource_relation, extension_name='OS-OAUTH1', extension_version='1.0') _build_ep_policy_rel = functools.partial( json_home.build_v3_extension_resource_relation, extension_name='OS-ENDPOINT-POLICY', extension_version='1.0') _build_ep_filter_rel = functools.partial( json_home.build_v3_extension_resource_relation, extension_name='OS-EP-FILTER', extension_version='1.0') _build_os_inherit_rel = functools.partial( json_home.build_v3_extension_resource_relation, extension_name='OS-INHERIT', extension_version='1.0') TRUST_ID_PARAMETER_RELATION = json_home.build_v3_extension_parameter_relation( 'OS-TRUST', '1.0', 'trust_id') IDP_ID_PARAMETER_RELATION = json_home.build_v3_extension_parameter_relation( 'OS-FEDERATION', '1.0', 'idp_id') PROTOCOL_ID_PARAM_RELATION = json_home.build_v3_extension_parameter_relation( 'OS-FEDERATION', '1.0', 'protocol_id') MAPPING_ID_PARAM_RELATION = json_home.build_v3_extension_parameter_relation( 'OS-FEDERATION', '1.0', 'mapping_id') SP_ID_PARAMETER_RELATION = json_home.build_v3_extension_parameter_relation( 'OS-FEDERATION', '1.0', 'sp_id') CONSUMER_ID_PARAMETER_RELATION = ( json_home.build_v3_extension_parameter_relation( 'OS-OAUTH1', '1.0', 'consumer_id')) REQUEST_TOKEN_ID_PARAMETER_RELATION = ( json_home.build_v3_extension_parameter_relation( 'OS-OAUTH1', '1.0', 'request_token_id')) ACCESS_TOKEN_ID_PARAMETER_RELATION = ( json_home.build_v3_extension_parameter_relation( 'OS-OAUTH1', '1.0', 'access_token_id')) ENDPOINT_GROUP_ID_PARAMETER_RELATION = ( json_home.build_v3_extension_parameter_relation( 'OS-EP-FILTER', '1.0', 'endpoint_group_id')) BASE_IDP_PROTOCOL = '/OS-FEDERATION/identity_providers/{idp_id}/protocols' BASE_EP_POLICY = '/policies/{policy_id}/OS-ENDPOINT-POLICY' BASE_EP_FILTER_PREFIX = '/OS-EP-FILTER' BASE_EP_FILTER = BASE_EP_FILTER_PREFIX + '/endpoint_groups/{endpoint_group_id}' BASE_ACCESS_TOKEN = ( '/users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id}') FEDERATED_AUTH_URL = ('/OS-FEDERATION/identity_providers/{idp_id}' '/protocols/{protocol_id}/auth') FEDERATED_IDP_SPECIFIC_WEBSSO = ('/auth/OS-FEDERATION/identity_providers/' '{idp_id}/protocols/{protocol_id}/websso') V3_JSON_HOME_RESOURCES = { json_home.build_v3_resource_relation('auth_tokens'): { 'href': '/auth/tokens'}, json_home.build_v3_resource_relation('auth_catalog'): { 'href': '/auth/catalog'}, json_home.build_v3_resource_relation('auth_projects'): { 'href': '/auth/projects'}, json_home.build_v3_resource_relation('auth_domains'): { 'href': '/auth/domains'}, json_home.build_v3_resource_relation('credential'): { 'href-template': '/credentials/{credential_id}', 'href-vars': { 'credential_id': json_home.build_v3_parameter_relation('credential_id')}}, json_home.build_v3_resource_relation('credentials'): { 'href': '/credentials'}, json_home.build_v3_resource_relation('domain'): { 'href-template': '/domains/{domain_id}', 'href-vars': {'domain_id': json_home.Parameters.DOMAIN_ID, }}, json_home.build_v3_resource_relation('domain_group_role'): { 'href-template': '/domains/{domain_id}/groups/{group_id}/roles/{role_id}', 'href-vars': { 'domain_id': json_home.Parameters.DOMAIN_ID, 'group_id': json_home.Parameters.GROUP_ID, 'role_id': json_home.Parameters.ROLE_ID, }}, json_home.build_v3_resource_relation('domain_group_roles'): { 'href-template': '/domains/{domain_id}/groups/{group_id}/roles', 'href-vars': { 'domain_id': json_home.Parameters.DOMAIN_ID, 'group_id': json_home.Parameters.GROUP_ID}}, json_home.build_v3_resource_relation('domain_user_role'): { 'href-template': '/domains/{domain_id}/users/{user_id}/roles/{role_id}', 'href-vars': { 'domain_id': json_home.Parameters.DOMAIN_ID, 'role_id': json_home.Parameters.ROLE_ID, 'user_id': json_home.Parameters.USER_ID, }}, json_home.build_v3_resource_relation('domain_user_roles'): { 'href-template': '/domains/{domain_id}/users/{user_id}/roles', 'href-vars': { 'domain_id': json_home.Parameters.DOMAIN_ID, 'user_id': json_home.Parameters.USER_ID, }}, json_home.build_v3_resource_relation('domains'): {'href': '/domains'}, json_home.build_v3_resource_relation('endpoint'): { 'href-template': '/endpoints/{endpoint_id}', 'href-vars': { 'endpoint_id': json_home.build_v3_parameter_relation('endpoint_id'), }}, json_home.build_v3_resource_relation('endpoints'): { 'href': '/endpoints'}, _build_ec2tokens_relation(resource_name='ec2tokens'): { 'href': '/ec2tokens'}, _build_ec2tokens_relation(resource_name='user_credential'): { 'href-template': '/users/{user_id}/credentials/OS-EC2/{credential_id}', 'href-vars': { 'credential_id': json_home.build_v3_parameter_relation('credential_id'), 'user_id': json_home.Parameters.USER_ID, }}, _build_ec2tokens_relation(resource_name='user_credentials'): { 'href-template': '/users/{user_id}/credentials/OS-EC2', 'href-vars': { 'user_id': json_home.Parameters.USER_ID, }}, REVOCATIONS_RELATION: { 'href': '/auth/tokens/OS-PKI/revoked'}, 'http://docs.openstack.org/api/openstack-identity/3/ext/OS-REVOKE/1.0/rel/' 'events': { 'href': '/OS-REVOKE/events'}, _build_simple_cert_relation(resource_name='ca_certificate'): { 'href': '/OS-SIMPLE-CERT/ca'}, _build_simple_cert_relation(resource_name='certificates'): { 'href': '/OS-SIMPLE-CERT/certificates'}, _build_trust_relation(resource_name='trust'): { 'href-template': '/OS-TRUST/trusts/{trust_id}', 'href-vars': {'trust_id': TRUST_ID_PARAMETER_RELATION, }}, _build_trust_relation(resource_name='trust_role'): { 'href-template': '/OS-TRUST/trusts/{trust_id}/roles/{role_id}', 'href-vars': { 'role_id': json_home.Parameters.ROLE_ID, 'trust_id': TRUST_ID_PARAMETER_RELATION, }}, _build_trust_relation(resource_name='trust_roles'): { 'href-template': '/OS-TRUST/trusts/{trust_id}/roles', 'href-vars': {'trust_id': TRUST_ID_PARAMETER_RELATION, }}, _build_trust_relation(resource_name='trusts'): { 'href': '/OS-TRUST/trusts'}, 'http://docs.openstack.org/api/openstack-identity/3/ext/s3tokens/1.0/rel/' 's3tokens': { 'href': '/s3tokens'}, json_home.build_v3_resource_relation('group'): { 'href-template': '/groups/{group_id}', 'href-vars': { 'group_id': json_home.Parameters.GROUP_ID, }}, json_home.build_v3_resource_relation('group_user'): { 'href-template': '/groups/{group_id}/users/{user_id}', 'href-vars': { 'group_id': json_home.Parameters.GROUP_ID, 'user_id': json_home.Parameters.USER_ID, }}, json_home.build_v3_resource_relation('group_users'): { 'href-template': '/groups/{group_id}/users', 'href-vars': {'group_id': json_home.Parameters.GROUP_ID, }}, json_home.build_v3_resource_relation('groups'): {'href': '/groups'}, json_home.build_v3_resource_relation('policies'): { 'href': '/policies'}, json_home.build_v3_resource_relation('policy'): { 'href-template': '/policies/{policy_id}', 'href-vars': { 'policy_id': json_home.build_v3_parameter_relation('policy_id'), }}, json_home.build_v3_resource_relation('project'): { 'href-template': '/projects/{project_id}', 'href-vars': { 'project_id': json_home.Parameters.PROJECT_ID, }}, json_home.build_v3_resource_relation('project_group_role'): { 'href-template': '/projects/{project_id}/groups/{group_id}/roles/{role_id}', 'href-vars': { 'group_id': json_home.Parameters.GROUP_ID, 'project_id': json_home.Parameters.PROJECT_ID, 'role_id': json_home.Parameters.ROLE_ID, }}, json_home.build_v3_resource_relation('project_group_roles'): { 'href-template': '/projects/{project_id}/groups/{group_id}/roles', 'href-vars': { 'group_id': json_home.Parameters.GROUP_ID, 'project_id': json_home.Parameters.PROJECT_ID, }}, json_home.build_v3_resource_relation('project_user_role'): { 'href-template': '/projects/{project_id}/users/{user_id}/roles/{role_id}', 'href-vars': { 'project_id': json_home.Parameters.PROJECT_ID, 'role_id': json_home.Parameters.ROLE_ID, 'user_id': json_home.Parameters.USER_ID, }}, json_home.build_v3_resource_relation('project_user_roles'): { 'href-template': '/projects/{project_id}/users/{user_id}/roles', 'href-vars': { 'project_id': json_home.Parameters.PROJECT_ID, 'user_id': json_home.Parameters.USER_ID, }}, json_home.build_v3_resource_relation('projects'): { 'href': '/projects'}, json_home.build_v3_resource_relation('region'): { 'href-template': '/regions/{region_id}', 'href-vars': { 'region_id': json_home.build_v3_parameter_relation('region_id'), }}, json_home.build_v3_resource_relation('regions'): {'href': '/regions'}, json_home.build_v3_resource_relation('role'): { 'href-template': '/roles/{role_id}', 'href-vars': { 'role_id': json_home.Parameters.ROLE_ID, }}, json_home.build_v3_resource_relation('implied_roles'): { 'href-template': '/roles/{prior_role_id}/implies', 'href-vars': { 'prior_role_id': json_home.Parameters.ROLE_ID}, 'hints': {'status': 'experimental'}}, json_home.build_v3_resource_relation('implied_role'): { 'href-template': '/roles/{prior_role_id}/implies/{implied_role_id}', 'href-vars': { 'prior_role_id': json_home.Parameters.ROLE_ID, 'implied_role_id': json_home.Parameters.ROLE_ID, }, 'hints': {'status': 'experimental'}}, json_home.build_v3_resource_relation('role_inferences'): { 'href': '/role_inferences', 'hints': {'status': 'experimental'}}, json_home.build_v3_resource_relation('role_assignments'): { 'href': '/role_assignments'}, json_home.build_v3_resource_relation('roles'): {'href': '/roles'}, json_home.build_v3_resource_relation('service'): { 'href-template': '/services/{service_id}', 'href-vars': { 'service_id': json_home.build_v3_parameter_relation('service_id')}}, json_home.build_v3_resource_relation('services'): { 'href': '/services'}, json_home.build_v3_resource_relation('user'): { 'href-template': '/users/{user_id}', 'href-vars': { 'user_id': json_home.Parameters.USER_ID, }}, json_home.build_v3_resource_relation('user_change_password'): { 'href-template': '/users/{user_id}/password', 'href-vars': {'user_id': json_home.Parameters.USER_ID, }}, json_home.build_v3_resource_relation('user_groups'): { 'href-template': '/users/{user_id}/groups', 'href-vars': {'user_id': json_home.Parameters.USER_ID, }}, json_home.build_v3_resource_relation('user_projects'): { 'href-template': '/users/{user_id}/projects', 'href-vars': {'user_id': json_home.Parameters.USER_ID, }}, json_home.build_v3_resource_relation('users'): {'href': '/users'}, _build_federation_rel(resource_name='domains'): { 'href': '/auth/domains'}, _build_federation_rel(resource_name='websso'): { 'href-template': '/auth/OS-FEDERATION/websso/{protocol_id}', 'href-vars': { 'protocol_id': PROTOCOL_ID_PARAM_RELATION, }}, _build_federation_rel(resource_name='projects'): { 'href': '/auth/projects'}, _build_federation_rel(resource_name='saml2'): { 'href': '/auth/OS-FEDERATION/saml2'}, _build_federation_rel(resource_name='ecp'): { 'href': '/auth/OS-FEDERATION/saml2/ecp'}, _build_federation_rel(resource_name='metadata'): { 'href': '/OS-FEDERATION/saml2/metadata'}, _build_federation_rel(resource_name='identity_providers'): { 'href': '/OS-FEDERATION/identity_providers'}, _build_federation_rel(resource_name='service_providers'): { 'href': '/OS-FEDERATION/service_providers'}, _build_federation_rel(resource_name='mappings'): { 'href': '/OS-FEDERATION/mappings'}, _build_federation_rel(resource_name='identity_provider'): { 'href-template': '/OS-FEDERATION/identity_providers/{idp_id}', 'href-vars': {'idp_id': IDP_ID_PARAMETER_RELATION, }}, _build_federation_rel(resource_name='identity_providers'): { 'href-template': FEDERATED_IDP_SPECIFIC_WEBSSO, 'href-vars': { 'idp_id': IDP_ID_PARAMETER_RELATION, 'protocol_id': PROTOCOL_ID_PARAM_RELATION, }}, _build_federation_rel(resource_name='service_provider'): { 'href-template': '/OS-FEDERATION/service_providers/{sp_id}', 'href-vars': {'sp_id': SP_ID_PARAMETER_RELATION, }}, _build_federation_rel(resource_name='mapping'): { 'href-template': '/OS-FEDERATION/mappings/{mapping_id}', 'href-vars': {'mapping_id': MAPPING_ID_PARAM_RELATION, }}, _build_federation_rel(resource_name='identity_provider_protocol'): { 'href-template': BASE_IDP_PROTOCOL + '/{protocol_id}', 'href-vars': { 'idp_id': IDP_ID_PARAMETER_RELATION, 'protocol_id': PROTOCOL_ID_PARAM_RELATION, }}, _build_federation_rel(resource_name='identity_provider_protocols'): { 'href-template': BASE_IDP_PROTOCOL, 'href-vars': { 'idp_id': IDP_ID_PARAMETER_RELATION}}, _build_federation_rel(resource_name='identity_provider_protocol_auth'): { 'href-template': FEDERATED_AUTH_URL, 'href-vars': { 'idp_id': IDP_ID_PARAMETER_RELATION, 'protocol_id': PROTOCOL_ID_PARAM_RELATION, }}, _build_oauth1_rel(resource_name='access_tokens'): { 'href': '/OS-OAUTH1/access_token'}, _build_oauth1_rel(resource_name='request_tokens'): { 'href': '/OS-OAUTH1/request_token'}, _build_oauth1_rel(resource_name='consumers'): { 'href': '/OS-OAUTH1/consumers'}, _build_oauth1_rel(resource_name='authorize_request_token'): { 'href-template': '/OS-OAUTH1/authorize/{request_token_id}', 'href-vars': {'request_token_id': REQUEST_TOKEN_ID_PARAMETER_RELATION, }}, _build_oauth1_rel(resource_name='consumer'): { 'href-template': '/OS-OAUTH1/consumers/{consumer_id}', 'href-vars': {'consumer_id': CONSUMER_ID_PARAMETER_RELATION, }}, _build_oauth1_rel(resource_name='user_access_token'): { 'href-template': BASE_ACCESS_TOKEN, 'href-vars': {'user_id': json_home.Parameters.USER_ID, 'access_token_id': ACCESS_TOKEN_ID_PARAMETER_RELATION, }}, _build_oauth1_rel(resource_name='user_access_tokens'): { 'href-template': '/users/{user_id}/OS-OAUTH1/access_tokens', 'href-vars': {'user_id': json_home.Parameters.USER_ID, }}, _build_oauth1_rel(resource_name='user_access_token_role'): { 'href-template': BASE_ACCESS_TOKEN + '/roles/{role_id}', 'href-vars': {'user_id': json_home.Parameters.USER_ID, 'role_id': json_home.Parameters.ROLE_ID, 'access_token_id': ACCESS_TOKEN_ID_PARAMETER_RELATION, }}, _build_oauth1_rel(resource_name='user_access_token_roles'): { 'href-template': BASE_ACCESS_TOKEN + '/roles', 'href-vars': {'user_id': json_home.Parameters.USER_ID, 'access_token_id': ACCESS_TOKEN_ID_PARAMETER_RELATION, }}, _build_ep_policy_rel(resource_name='endpoint_policy'): { 'href-template': '/endpoints/{endpoint_id}/OS-ENDPOINT-POLICY/policy', 'href-vars': {'endpoint_id': json_home.Parameters.ENDPOINT_ID, }}, _build_ep_policy_rel(resource_name='endpoint_policy_association'): { 'href-template': BASE_EP_POLICY + '/endpoints/{endpoint_id}', 'href-vars': {'endpoint_id': json_home.Parameters.ENDPOINT_ID, 'policy_id': json_home.Parameters.POLICY_ID, }}, _build_ep_policy_rel(resource_name='policy_endpoints'): { 'href-template': BASE_EP_POLICY + '/endpoints', 'href-vars': {'policy_id': json_home.Parameters.POLICY_ID, }}, _build_ep_policy_rel( resource_name='region_and_service_policy_association'): { 'href-template': (BASE_EP_POLICY + '/services/{service_id}/regions/{region_id}'), 'href-vars': {'policy_id': json_home.Parameters.POLICY_ID, 'service_id': json_home.Parameters.SERVICE_ID, 'region_id': json_home.Parameters.REGION_ID, }}, _build_ep_policy_rel(resource_name='service_policy_association'): { 'href-template': BASE_EP_POLICY + '/services/{service_id}', 'href-vars': {'policy_id': json_home.Parameters.POLICY_ID, 'service_id': json_home.Parameters.SERVICE_ID, }}, _build_ep_filter_rel(resource_name='endpoint_group'): { 'href-template': '/OS-EP-FILTER/endpoint_groups/{endpoint_group_id}', 'href-vars': {'endpoint_group_id': ENDPOINT_GROUP_ID_PARAMETER_RELATION, }}, _build_ep_filter_rel( resource_name='endpoint_group_to_project_association'): { 'href-template': BASE_EP_FILTER + '/projects/{project_id}', 'href-vars': {'endpoint_group_id': ENDPOINT_GROUP_ID_PARAMETER_RELATION, 'project_id': json_home.Parameters.PROJECT_ID, }}, _build_ep_filter_rel(resource_name='endpoint_groups'): {'href': '/OS-EP-FILTER/endpoint_groups'}, _build_ep_filter_rel(resource_name='endpoint_projects'): { 'href-template': '/OS-EP-FILTER/endpoints/{endpoint_id}/projects', 'href-vars': {'endpoint_id': json_home.Parameters.ENDPOINT_ID, }}, _build_ep_filter_rel(resource_name='endpoints_in_endpoint_group'): { 'href-template': BASE_EP_FILTER + '/endpoints', 'href-vars': {'endpoint_group_id': ENDPOINT_GROUP_ID_PARAMETER_RELATION, }}, _build_ep_filter_rel(resource_name='project_endpoint_groups'): { 'href-template': (BASE_EP_FILTER_PREFIX + '/projects/{project_id}' + '/endpoint_groups'), 'href-vars': {'project_id': json_home.Parameters.PROJECT_ID, }}, _build_ep_filter_rel(resource_name='project_endpoint'): { 'href-template': ('/OS-EP-FILTER/projects/{project_id}' '/endpoints/{endpoint_id}'), 'href-vars': {'endpoint_id': json_home.Parameters.ENDPOINT_ID, 'project_id': json_home.Parameters.PROJECT_ID, }}, _build_ep_filter_rel(resource_name='project_endpoints'): { 'href-template': '/OS-EP-FILTER/projects/{project_id}/endpoints', 'href-vars': {'project_id': json_home.Parameters.PROJECT_ID, }}, _build_ep_filter_rel( resource_name='projects_associated_with_endpoint_group'): { 'href-template': BASE_EP_FILTER + '/projects', 'href-vars': {'endpoint_group_id': ENDPOINT_GROUP_ID_PARAMETER_RELATION, }}, _build_os_inherit_rel( resource_name='domain_user_role_inherited_to_projects'): { 'href-template': '/OS-INHERIT/domains/{domain_id}/users/' '{user_id}/roles/{role_id}/inherited_to_projects', 'href-vars': { 'domain_id': json_home.Parameters.DOMAIN_ID, 'role_id': json_home.Parameters.ROLE_ID, 'user_id': json_home.Parameters.USER_ID, }}, _build_os_inherit_rel( resource_name='domain_group_role_inherited_to_projects'): { 'href-template': '/OS-INHERIT/domains/{domain_id}/groups/' '{group_id}/roles/{role_id}/inherited_to_projects', 'href-vars': { 'domain_id': json_home.Parameters.DOMAIN_ID, 'group_id': json_home.Parameters.GROUP_ID, 'role_id': json_home.Parameters.ROLE_ID, }}, _build_os_inherit_rel( resource_name='domain_user_roles_inherited_to_projects'): { 'href-template': '/OS-INHERIT/domains/{domain_id}/users/' '{user_id}/roles/inherited_to_projects', 'href-vars': { 'domain_id': json_home.Parameters.DOMAIN_ID, 'user_id': json_home.Parameters.USER_ID, }}, _build_os_inherit_rel( resource_name='domain_group_roles_inherited_to_projects'): { 'href-template': '/OS-INHERIT/domains/{domain_id}/groups/' '{group_id}/roles/inherited_to_projects', 'href-vars': { 'domain_id': json_home.Parameters.DOMAIN_ID, 'group_id': json_home.Parameters.GROUP_ID, }}, _build_os_inherit_rel( resource_name='project_user_role_inherited_to_projects'): { 'href-template': '/OS-INHERIT/projects/{project_id}/users/' '{user_id}/roles/{role_id}/inherited_to_projects', 'href-vars': { 'project_id': json_home.Parameters.PROJECT_ID, 'role_id': json_home.Parameters.ROLE_ID, 'user_id': json_home.Parameters.USER_ID, }}, _build_os_inherit_rel( resource_name='project_group_role_inherited_to_projects'): { 'href-template': '/OS-INHERIT/projects/{project_id}/groups/' '{group_id}/roles/{role_id}/inherited_to_projects', 'href-vars': { 'project_id': json_home.Parameters.PROJECT_ID, 'group_id': json_home.Parameters.GROUP_ID, 'role_id': json_home.Parameters.ROLE_ID, }}, json_home.build_v3_resource_relation('domain_config'): { 'href-template': '/domains/{domain_id}/config', 'href-vars': { 'domain_id': json_home.Parameters.DOMAIN_ID}, 'hints': {'status': 'experimental'}}, json_home.build_v3_resource_relation('domain_config_group'): { 'href-template': '/domains/{domain_id}/config/{group}', 'href-vars': { 'domain_id': json_home.Parameters.DOMAIN_ID, 'group': json_home.build_v3_parameter_relation('config_group')}, 'hints': {'status': 'experimental'}}, json_home.build_v3_resource_relation('domain_config_option'): { 'href-template': '/domains/{domain_id}/config/{group}/{option}', 'href-vars': { 'domain_id': json_home.Parameters.DOMAIN_ID, 'group': json_home.build_v3_parameter_relation('config_group'), 'option': json_home.build_v3_parameter_relation('config_option')}, 'hints': {'status': 'experimental'}}, json_home.build_v3_resource_relation('domain_config_default'): { 'href': '/domains/config/default', 'hints': {'status': 'experimental'}}, json_home.build_v3_resource_relation('domain_config_default_group'): { 'href-template': '/domains/config/{group}/default', 'href-vars': { 'group': json_home.build_v3_parameter_relation('config_group')}, 'hints': {'status': 'experimental'}}, json_home.build_v3_resource_relation('domain_config_default_option'): { 'href-template': '/domains/config/{group}/{option}/default', 'href-vars': { 'group': json_home.build_v3_parameter_relation('config_group'), 'option': json_home.build_v3_parameter_relation('config_option')}, 'hints': {'status': 'experimental'}}, } class TestClient(object): def __init__(self, app=None, token=None): self.app = app self.token = token def request(self, method, path, headers=None, body=None): if headers is None: headers = {} if self.token: headers.setdefault('X-Auth-Token', self.token) req = webob.Request.blank(path) req.method = method for k, v in headers.items(): req.headers[k] = v if body: req.body = body return req.get_response(self.app) def get(self, path, headers=None): return self.request('GET', path=path, headers=headers) def post(self, path, headers=None, body=None): return self.request('POST', path=path, headers=headers, body=body) def put(self, path, headers=None, body=None): return self.request('PUT', path=path, headers=headers, body=body) class _VersionsEqual(tt_matchers.MatchesListwise): def __init__(self, expected): super(_VersionsEqual, self).__init__([ tt_matchers.KeysEqual(expected), tt_matchers.KeysEqual(expected['versions']), tt_matchers.HasLength(len(expected['versions']['values'])), tt_matchers.ContainsAll(expected['versions']['values']), ]) def match(self, other): return super(_VersionsEqual, self).match([ other, other['versions'], other['versions']['values'], other['versions']['values'], ]) class VersionTestCase(unit.TestCase): def setUp(self): super(VersionTestCase, self).setUp() self.load_backends() self.public_app = self.loadapp('keystone', 'main') self.admin_app = self.loadapp('keystone', 'admin') self.config_fixture.config( public_endpoint='http://localhost:%(public_port)d', admin_endpoint='http://localhost:%(admin_port)d') def config_overrides(self): super(VersionTestCase, self).config_overrides() admin_port = random.randint(10000, 30000) public_port = random.randint(40000, 60000) self.config_fixture.config(group='eventlet_server', public_port=public_port, admin_port=admin_port) def _paste_in_port(self, response, port): for link in response['links']: if link['rel'] == 'self': link['href'] = port def test_public_versions(self): client = TestClient(self.public_app) resp = client.get('/') self.assertEqual(300, resp.status_int) data = jsonutils.loads(resp.body) expected = VERSIONS_RESPONSE for version in expected['versions']['values']: if version['id'].startswith('v3'): self._paste_in_port( version, 'http://localhost:%s/v3/' % CONF.eventlet_server.public_port) elif version['id'] == 'v2.0': self._paste_in_port( version, 'http://localhost:%s/v2.0/' % CONF.eventlet_server.public_port) self.assertThat(data, _VersionsEqual(expected)) def test_admin_versions(self): client = TestClient(self.admin_app) resp = client.get('/') self.assertEqual(300, resp.status_int) data = jsonutils.loads(resp.body) expected = VERSIONS_RESPONSE for version in expected['versions']['values']: if version['id'].startswith('v3'): self._paste_in_port( version, 'http://localhost:%s/v3/' % CONF.eventlet_server.admin_port) elif version['id'] == 'v2.0': self._paste_in_port( version, 'http://localhost:%s/v2.0/' % CONF.eventlet_server.admin_port) self.assertThat(data, _VersionsEqual(expected)) def test_use_site_url_if_endpoint_unset(self): self.config_fixture.config(public_endpoint=None, admin_endpoint=None) for app in (self.public_app, self.admin_app): client = TestClient(app) resp = client.get('/') self.assertEqual(300, resp.status_int) data = jsonutils.loads(resp.body) expected = VERSIONS_RESPONSE for version in expected['versions']['values']: # localhost happens to be the site url for tests if version['id'].startswith('v3'): self._paste_in_port( version, 'http://localhost/v3/') elif version['id'] == 'v2.0': self._paste_in_port( version, 'http://localhost/v2.0/') self.assertThat(data, _VersionsEqual(expected)) def test_public_version_v2(self): client = TestClient(self.public_app) resp = client.get('/v2.0/') self.assertEqual(http_client.OK, resp.status_int) data = jsonutils.loads(resp.body) expected = v2_VERSION_RESPONSE self._paste_in_port(expected['version'], 'http://localhost:%s/v2.0/' % CONF.eventlet_server.public_port) self.assertEqual(expected, data) def test_admin_version_v2(self): client = TestClient(self.admin_app) resp = client.get('/v2.0/') self.assertEqual(http_client.OK, resp.status_int) data = jsonutils.loads(resp.body) expected = v2_VERSION_RESPONSE self._paste_in_port(expected['version'], 'http://localhost:%s/v2.0/' % CONF.eventlet_server.admin_port) self.assertEqual(expected, data) def test_use_site_url_if_endpoint_unset_v2(self): self.config_fixture.config(public_endpoint=None, admin_endpoint=None) for app in (self.public_app, self.admin_app): client = TestClient(app) resp = client.get('/v2.0/') self.assertEqual(http_client.OK, resp.status_int) data = jsonutils.loads(resp.body) expected = v2_VERSION_RESPONSE self._paste_in_port(expected['version'], 'http://localhost/v2.0/') self.assertEqual(data, expected) def test_public_version_v3(self): client = TestClient(self.public_app) resp = client.get('/v3/') self.assertEqual(http_client.OK, resp.status_int) data = jsonutils.loads(resp.body) expected = v3_VERSION_RESPONSE self._paste_in_port(expected['version'], 'http://localhost:%s/v3/' % CONF.eventlet_server.public_port) self.assertEqual(expected, data) @utils.wip('waiting on bug #1381961') def test_admin_version_v3(self): client = TestClient(self.admin_app) resp = client.get('/v3/') self.assertEqual(http_client.OK, resp.status_int) data = jsonutils.loads(resp.body) expected = v3_VERSION_RESPONSE self._paste_in_port(expected['version'], 'http://localhost:%s/v3/' % CONF.eventlet_server.admin_port) self.assertEqual(expected, data) def test_use_site_url_if_endpoint_unset_v3(self): self.config_fixture.config(public_endpoint=None, admin_endpoint=None) for app in (self.public_app, self.admin_app): client = TestClient(app) resp = client.get('/v3/') self.assertEqual(http_client.OK, resp.status_int) data = jsonutils.loads(resp.body) expected = v3_VERSION_RESPONSE self._paste_in_port(expected['version'], 'http://localhost/v3/') self.assertEqual(expected, data) @mock.patch.object(controllers, '_VERSIONS', ['v3']) def test_v2_disabled(self): client = TestClient(self.public_app) # request to /v2.0 should fail resp = client.get('/v2.0/') self.assertEqual(http_client.NOT_FOUND, resp.status_int) # request to /v3 should pass resp = client.get('/v3/') self.assertEqual(http_client.OK, resp.status_int) data = jsonutils.loads(resp.body) expected = v3_VERSION_RESPONSE self._paste_in_port(expected['version'], 'http://localhost:%s/v3/' % CONF.eventlet_server.public_port) self.assertEqual(expected, data) # only v3 information should be displayed by requests to / v3_only_response = { "versions": { "values": [ v3_EXPECTED_RESPONSE ] } } self._paste_in_port(v3_only_response['versions']['values'][0], 'http://localhost:%s/v3/' % CONF.eventlet_server.public_port) resp = client.get('/') self.assertEqual(300, resp.status_int) data = jsonutils.loads(resp.body) self.assertEqual(v3_only_response, data) @mock.patch.object(controllers, '_VERSIONS', ['v2.0']) def test_v3_disabled(self): client = TestClient(self.public_app) # request to /v3 should fail resp = client.get('/v3/') self.assertEqual(http_client.NOT_FOUND, resp.status_int) # request to /v2.0 should pass resp = client.get('/v2.0/') self.assertEqual(http_client.OK, resp.status_int) data = jsonutils.loads(resp.body) expected = v2_VERSION_RESPONSE self._paste_in_port(expected['version'], 'http://localhost:%s/v2.0/' % CONF.eventlet_server.public_port) self.assertEqual(expected, data) # only v2 information should be displayed by requests to / v2_only_response = { "versions": { "values": [ v2_EXPECTED_RESPONSE ] } } self._paste_in_port(v2_only_response['versions']['values'][0], 'http://localhost:%s/v2.0/' % CONF.eventlet_server.public_port) resp = client.get('/') self.assertEqual(300, resp.status_int) data = jsonutils.loads(resp.body) self.assertEqual(v2_only_response, data) def _test_json_home(self, path, exp_json_home_data): client = TestClient(self.public_app) resp = client.get(path, headers={'Accept': 'application/json-home'}) self.assertThat(resp.status, tt_matchers.Equals('200 OK')) self.assertThat(resp.headers['Content-Type'], tt_matchers.Equals('application/json-home')) self.assertThat(jsonutils.loads(resp.body), tt_matchers.Equals(exp_json_home_data)) def test_json_home_v3(self): # If the request is /v3 and the Accept header is application/json-home # then the server responds with a JSON Home document. exp_json_home_data = { 'resources': V3_JSON_HOME_RESOURCES} self._test_json_home('/v3', exp_json_home_data) def test_json_home_root(self): # If the request is / and the Accept header is application/json-home # then the server responds with a JSON Home document. exp_json_home_data = copy.deepcopy({ 'resources': V3_JSON_HOME_RESOURCES}) json_home.translate_urls(exp_json_home_data, '/v3') self._test_json_home('/', exp_json_home_data) def test_accept_type_handling(self): # Accept headers with multiple types and qvalues are handled. def make_request(accept_types=None): client = TestClient(self.public_app) headers = None if accept_types: headers = {'Accept': accept_types} resp = client.get('/v3', headers=headers) self.assertThat(resp.status, tt_matchers.Equals('200 OK')) return resp.headers['Content-Type'] JSON = controllers.MimeTypes.JSON JSON_HOME = controllers.MimeTypes.JSON_HOME JSON_MATCHER = tt_matchers.Equals(JSON) JSON_HOME_MATCHER = tt_matchers.Equals(JSON_HOME) # Default is JSON. self.assertThat(make_request(), JSON_MATCHER) # Can request JSON and get JSON. self.assertThat(make_request(JSON), JSON_MATCHER) # Can request JSONHome and get JSONHome. self.assertThat(make_request(JSON_HOME), JSON_HOME_MATCHER) # If request JSON, JSON Home get JSON. accept_types = '%s, %s' % (JSON, JSON_HOME) self.assertThat(make_request(accept_types), JSON_MATCHER) # If request JSON Home, JSON get JSON. accept_types = '%s, %s' % (JSON_HOME, JSON) self.assertThat(make_request(accept_types), JSON_MATCHER) # If request JSON Home, JSON;q=0.5 get JSON Home. accept_types = '%s, %s;q=0.5' % (JSON_HOME, JSON) self.assertThat(make_request(accept_types), JSON_HOME_MATCHER) # If request some unknown mime-type, get JSON. self.assertThat(make_request(self.getUniqueString()), JSON_MATCHER) @mock.patch.object(controllers, '_VERSIONS', []) def test_no_json_home_document_returned_when_v3_disabled(self): json_home_document = controllers.request_v3_json_home('some_prefix') expected_document = {'resources': {}} self.assertEqual(expected_document, json_home_document) def test_extension_property_method_returns_none(self): extension_obj = controllers.Extensions() extensions_property = extension_obj.extensions self.assertIsNone(extensions_property) class VersionSingleAppTestCase(unit.TestCase): """Tests running with a single application loaded. These are important because when Keystone is running in Apache httpd there's only one application loaded for each instance. """ def setUp(self): super(VersionSingleAppTestCase, self).setUp() self.load_backends() self.config_fixture.config( public_endpoint='http://localhost:%(public_port)d', admin_endpoint='http://localhost:%(admin_port)d') def config_overrides(self): super(VersionSingleAppTestCase, self).config_overrides() admin_port = random.randint(10000, 30000) public_port = random.randint(40000, 60000) self.config_fixture.config(group='eventlet_server', public_port=public_port, admin_port=admin_port) def _paste_in_port(self, response, port): for link in response['links']: if link['rel'] == 'self': link['href'] = port def _test_version(self, app_name): def app_port(): if app_name == 'admin': return CONF.eventlet_server.admin_port else: return CONF.eventlet_server.public_port app = self.loadapp('keystone', app_name) client = TestClient(app) resp = client.get('/') self.assertEqual(300, resp.status_int) data = jsonutils.loads(resp.body) expected = VERSIONS_RESPONSE for version in expected['versions']['values']: if version['id'].startswith('v3'): self._paste_in_port( version, 'http://localhost:%s/v3/' % app_port()) elif version['id'] == 'v2.0': self._paste_in_port( version, 'http://localhost:%s/v2.0/' % app_port()) self.assertThat(data, _VersionsEqual(expected)) def test_public(self): self._test_version('main') def test_admin(self): self._test_version('admin') class VersionBehindSslTestCase(unit.TestCase): def setUp(self): super(VersionBehindSslTestCase, self).setUp() self.load_backends() self.public_app = self.loadapp('keystone', 'main') def config_overrides(self): super(VersionBehindSslTestCase, self).config_overrides() self.config_fixture.config( secure_proxy_ssl_header='HTTP_X_FORWARDED_PROTO') def _paste_in_port(self, response, port): for link in response['links']: if link['rel'] == 'self': link['href'] = port def _get_expected(self, host): expected = VERSIONS_RESPONSE for version in expected['versions']['values']: if version['id'].startswith('v3'): self._paste_in_port(version, host + 'v3/') elif version['id'] == 'v2.0': self._paste_in_port(version, host + 'v2.0/') return expected def test_versions_without_headers(self): client = TestClient(self.public_app) host_name = 'host-%d' % random.randint(10, 30) host_port = random.randint(10000, 30000) host = 'http://%s:%s/' % (host_name, host_port) resp = client.get(host) self.assertEqual(300, resp.status_int) data = jsonutils.loads(resp.body) expected = self._get_expected(host) self.assertThat(data, _VersionsEqual(expected)) def test_versions_with_header(self): client = TestClient(self.public_app) host_name = 'host-%d' % random.randint(10, 30) host_port = random.randint(10000, 30000) resp = client.get('http://%s:%s/' % (host_name, host_port), headers={'X-Forwarded-Proto': 'https'}) self.assertEqual(300, resp.status_int) data = jsonutils.loads(resp.body) expected = self._get_expected('https://%s:%s/' % (host_name, host_port)) self.assertThat(data, _VersionsEqual(expected)) keystone-9.0.0/keystone/tests/unit/test_v3_oauth1.py0000664000567000056710000011544512701407102023732 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import uuid import mock from oslo_log import versionutils from oslo_serialization import jsonutils from pycadf import cadftaxonomy from six.moves import http_client from six.moves import urllib from keystone.contrib.oauth1 import routers from keystone import exception from keystone import oauth1 from keystone.oauth1 import controllers from keystone.oauth1 import core from keystone.tests import unit from keystone.tests.unit.common import test_notifications from keystone.tests.unit import ksfixtures from keystone.tests.unit.ksfixtures import temporaryfile from keystone.tests.unit import test_v3 class OAuth1ContribTests(test_v3.RestfulTestCase): @mock.patch.object(versionutils, 'report_deprecated_feature') def test_exception_happens(self, mock_deprecator): routers.OAuth1Extension(mock.ANY) mock_deprecator.assert_called_once_with(mock.ANY, mock.ANY) args, _kwargs = mock_deprecator.call_args self.assertIn("Remove oauth1_extension from", args[1]) class OAuth1Tests(test_v3.RestfulTestCase): CONSUMER_URL = '/OS-OAUTH1/consumers' def setUp(self): super(OAuth1Tests, self).setUp() # Now that the app has been served, we can query CONF values self.base_url = 'http://localhost/v3' self.controller = controllers.OAuthControllerV3() def _create_single_consumer(self): ref = {'description': uuid.uuid4().hex} resp = self.post( self.CONSUMER_URL, body={'consumer': ref}) return resp.result['consumer'] def _create_request_token(self, consumer, project_id): endpoint = '/OS-OAUTH1/request_token' client = oauth1.Client(consumer['key'], client_secret=consumer['secret'], signature_method=oauth1.SIG_HMAC, callback_uri="oob") headers = {'requested_project_id': project_id} url, headers, body = client.sign(self.base_url + endpoint, http_method='POST', headers=headers) return endpoint, headers def _create_access_token(self, consumer, token): endpoint = '/OS-OAUTH1/access_token' client = oauth1.Client(consumer['key'], client_secret=consumer['secret'], resource_owner_key=token.key, resource_owner_secret=token.secret, signature_method=oauth1.SIG_HMAC, verifier=token.verifier) url, headers, body = client.sign(self.base_url + endpoint, http_method='POST') headers.update({'Content-Type': 'application/json'}) return endpoint, headers def _get_oauth_token(self, consumer, token): client = oauth1.Client(consumer['key'], client_secret=consumer['secret'], resource_owner_key=token.key, resource_owner_secret=token.secret, signature_method=oauth1.SIG_HMAC) endpoint = '/auth/tokens' url, headers, body = client.sign(self.base_url + endpoint, http_method='POST') headers.update({'Content-Type': 'application/json'}) ref = {'auth': {'identity': {'oauth1': {}, 'methods': ['oauth1']}}} return endpoint, headers, ref def _authorize_request_token(self, request_id): return '/OS-OAUTH1/authorize/%s' % (request_id) class ConsumerCRUDTests(OAuth1Tests): def _consumer_create(self, description=None, description_flag=True, **kwargs): if description_flag: ref = {'description': description} else: ref = {} if kwargs: ref.update(kwargs) resp = self.post( self.CONSUMER_URL, body={'consumer': ref}) consumer = resp.result['consumer'] consumer_id = consumer['id'] self.assertEqual(description, consumer['description']) self.assertIsNotNone(consumer_id) self.assertIsNotNone(consumer['secret']) return consumer def test_consumer_create(self): description = uuid.uuid4().hex self._consumer_create(description=description) def test_consumer_create_none_desc_1(self): self._consumer_create() def test_consumer_create_none_desc_2(self): self._consumer_create(description_flag=False) def test_consumer_create_normalize_field(self): # If create a consumer with a field with : or - in the name, # the name is normalized by converting those chars to _. field_name = 'some:weird-field' field_value = uuid.uuid4().hex extra_fields = {field_name: field_value} consumer = self._consumer_create(**extra_fields) normalized_field_name = 'some_weird_field' self.assertEqual(field_value, consumer[normalized_field_name]) def test_consumer_delete(self): consumer = self._create_single_consumer() consumer_id = consumer['id'] resp = self.delete(self.CONSUMER_URL + '/%s' % consumer_id) self.assertResponseStatus(resp, http_client.NO_CONTENT) def test_consumer_get(self): consumer = self._create_single_consumer() consumer_id = consumer['id'] resp = self.get(self.CONSUMER_URL + '/%s' % consumer_id) self_url = ['http://localhost/v3', self.CONSUMER_URL, '/', consumer_id] self_url = ''.join(self_url) self.assertEqual(self_url, resp.result['consumer']['links']['self']) self.assertEqual(consumer_id, resp.result['consumer']['id']) def test_consumer_list(self): self._consumer_create() resp = self.get(self.CONSUMER_URL) entities = resp.result['consumers'] self.assertIsNotNone(entities) self_url = ['http://localhost/v3', self.CONSUMER_URL] self_url = ''.join(self_url) self.assertEqual(self_url, resp.result['links']['self']) self.assertValidListLinks(resp.result['links']) def test_consumer_update(self): consumer = self._create_single_consumer() original_id = consumer['id'] original_description = consumer['description'] update_description = original_description + '_new' update_ref = {'description': update_description} update_resp = self.patch(self.CONSUMER_URL + '/%s' % original_id, body={'consumer': update_ref}) consumer = update_resp.result['consumer'] self.assertEqual(update_description, consumer['description']) self.assertEqual(original_id, consumer['id']) def test_consumer_update_bad_secret(self): consumer = self._create_single_consumer() original_id = consumer['id'] update_ref = copy.deepcopy(consumer) update_ref['description'] = uuid.uuid4().hex update_ref['secret'] = uuid.uuid4().hex self.patch(self.CONSUMER_URL + '/%s' % original_id, body={'consumer': update_ref}, expected_status=http_client.BAD_REQUEST) def test_consumer_update_bad_id(self): consumer = self._create_single_consumer() original_id = consumer['id'] original_description = consumer['description'] update_description = original_description + "_new" update_ref = copy.deepcopy(consumer) update_ref['description'] = update_description update_ref['id'] = update_description self.patch(self.CONSUMER_URL + '/%s' % original_id, body={'consumer': update_ref}, expected_status=http_client.BAD_REQUEST) def test_consumer_update_normalize_field(self): # If update a consumer with a field with : or - in the name, # the name is normalized by converting those chars to _. field1_name = 'some:weird-field' field1_orig_value = uuid.uuid4().hex extra_fields = {field1_name: field1_orig_value} consumer = self._consumer_create(**extra_fields) consumer_id = consumer['id'] field1_new_value = uuid.uuid4().hex field2_name = 'weird:some-field' field2_value = uuid.uuid4().hex update_ref = {field1_name: field1_new_value, field2_name: field2_value} update_resp = self.patch(self.CONSUMER_URL + '/%s' % consumer_id, body={'consumer': update_ref}) consumer = update_resp.result['consumer'] normalized_field1_name = 'some_weird_field' self.assertEqual(field1_new_value, consumer[normalized_field1_name]) normalized_field2_name = 'weird_some_field' self.assertEqual(field2_value, consumer[normalized_field2_name]) def test_consumer_create_no_description(self): resp = self.post(self.CONSUMER_URL, body={'consumer': {}}) consumer = resp.result['consumer'] consumer_id = consumer['id'] self.assertIsNone(consumer['description']) self.assertIsNotNone(consumer_id) self.assertIsNotNone(consumer['secret']) def test_consumer_get_bad_id(self): self.get(self.CONSUMER_URL + '/%(consumer_id)s' % {'consumer_id': uuid.uuid4().hex}, expected_status=http_client.NOT_FOUND) class OAuthFlowTests(OAuth1Tests): def test_oauth_flow(self): consumer = self._create_single_consumer() consumer_id = consumer['id'] consumer_secret = consumer['secret'] self.consumer = {'key': consumer_id, 'secret': consumer_secret} self.assertIsNotNone(self.consumer['secret']) url, headers = self._create_request_token(self.consumer, self.project_id) content = self.post( url, headers=headers, response_content_type='application/x-www-urlformencoded') credentials = urllib.parse.parse_qs(content.result) request_key = credentials['oauth_token'][0] request_secret = credentials['oauth_token_secret'][0] self.request_token = oauth1.Token(request_key, request_secret) self.assertIsNotNone(self.request_token.key) url = self._authorize_request_token(request_key) body = {'roles': [{'id': self.role_id}]} resp = self.put(url, body=body, expected_status=http_client.OK) self.verifier = resp.result['token']['oauth_verifier'] self.assertTrue(all(i in core.VERIFIER_CHARS for i in self.verifier)) self.assertEqual(8, len(self.verifier)) self.request_token.set_verifier(self.verifier) url, headers = self._create_access_token(self.consumer, self.request_token) content = self.post( url, headers=headers, response_content_type='application/x-www-urlformencoded') credentials = urllib.parse.parse_qs(content.result) access_key = credentials['oauth_token'][0] access_secret = credentials['oauth_token_secret'][0] self.access_token = oauth1.Token(access_key, access_secret) self.assertIsNotNone(self.access_token.key) url, headers, body = self._get_oauth_token(self.consumer, self.access_token) content = self.post(url, headers=headers, body=body) self.keystone_token_id = content.headers['X-Subject-Token'] self.keystone_token = content.result['token'] self.assertIsNotNone(self.keystone_token_id) class AccessTokenCRUDTests(OAuthFlowTests): def test_delete_access_token_dne(self): self.delete('/users/%(user)s/OS-OAUTH1/access_tokens/%(auth)s' % {'user': self.user_id, 'auth': uuid.uuid4().hex}, expected_status=http_client.NOT_FOUND) def test_list_no_access_tokens(self): resp = self.get('/users/%(user_id)s/OS-OAUTH1/access_tokens' % {'user_id': self.user_id}) entities = resp.result['access_tokens'] self.assertEqual([], entities) self.assertValidListLinks(resp.result['links']) def test_get_single_access_token(self): self.test_oauth_flow() url = '/users/%(user_id)s/OS-OAUTH1/access_tokens/%(key)s' % { 'user_id': self.user_id, 'key': self.access_token.key } resp = self.get(url) entity = resp.result['access_token'] self.assertEqual(self.access_token.key, entity['id']) self.assertEqual(self.consumer['key'], entity['consumer_id']) self.assertEqual('http://localhost/v3' + url, entity['links']['self']) def test_get_access_token_dne(self): self.get('/users/%(user_id)s/OS-OAUTH1/access_tokens/%(key)s' % {'user_id': self.user_id, 'key': uuid.uuid4().hex}, expected_status=http_client.NOT_FOUND) def test_list_all_roles_in_access_token(self): self.test_oauth_flow() resp = self.get('/users/%(id)s/OS-OAUTH1/access_tokens/%(key)s/roles' % {'id': self.user_id, 'key': self.access_token.key}) entities = resp.result['roles'] self.assertTrue(entities) self.assertValidListLinks(resp.result['links']) def test_get_role_in_access_token(self): self.test_oauth_flow() url = ('/users/%(id)s/OS-OAUTH1/access_tokens/%(key)s/roles/%(role)s' % {'id': self.user_id, 'key': self.access_token.key, 'role': self.role_id}) resp = self.get(url) entity = resp.result['role'] self.assertEqual(self.role_id, entity['id']) def test_get_role_in_access_token_dne(self): self.test_oauth_flow() url = ('/users/%(id)s/OS-OAUTH1/access_tokens/%(key)s/roles/%(role)s' % {'id': self.user_id, 'key': self.access_token.key, 'role': uuid.uuid4().hex}) self.get(url, expected_status=http_client.NOT_FOUND) def test_list_and_delete_access_tokens(self): self.test_oauth_flow() # List access_tokens should be > 0 resp = self.get('/users/%(user_id)s/OS-OAUTH1/access_tokens' % {'user_id': self.user_id}) entities = resp.result['access_tokens'] self.assertTrue(entities) self.assertValidListLinks(resp.result['links']) # Delete access_token resp = self.delete('/users/%(user)s/OS-OAUTH1/access_tokens/%(auth)s' % {'user': self.user_id, 'auth': self.access_token.key}) self.assertResponseStatus(resp, http_client.NO_CONTENT) # List access_token should be 0 resp = self.get('/users/%(user_id)s/OS-OAUTH1/access_tokens' % {'user_id': self.user_id}) entities = resp.result['access_tokens'] self.assertEqual([], entities) self.assertValidListLinks(resp.result['links']) class AuthTokenTests(OAuthFlowTests): def test_keystone_token_is_valid(self): self.test_oauth_flow() headers = {'X-Subject-Token': self.keystone_token_id, 'X-Auth-Token': self.keystone_token_id} r = self.get('/auth/tokens', headers=headers) self.assertValidTokenResponse(r, self.user) # now verify the oauth section oauth_section = r.result['token']['OS-OAUTH1'] self.assertEqual(self.access_token.key, oauth_section['access_token_id']) self.assertEqual(self.consumer['key'], oauth_section['consumer_id']) # verify the roles section roles_list = r.result['token']['roles'] # we can just verify the 0th role since we are only assigning one role self.assertEqual(self.role_id, roles_list[0]['id']) # verify that the token can perform delegated tasks ref = unit.new_user_ref(domain_id=self.domain_id) r = self.admin_request(path='/v3/users', headers=headers, method='POST', body={'user': ref}) self.assertValidUserResponse(r, ref) def test_delete_access_token_also_revokes_token(self): self.test_oauth_flow() # Delete access token resp = self.delete('/users/%(user)s/OS-OAUTH1/access_tokens/%(auth)s' % {'user': self.user_id, 'auth': self.access_token.key}) self.assertResponseStatus(resp, http_client.NO_CONTENT) # Check Keystone Token no longer exists headers = {'X-Subject-Token': self.keystone_token_id, 'X-Auth-Token': self.keystone_token_id} self.get('/auth/tokens', headers=headers, expected_status=http_client.NOT_FOUND) def test_deleting_consumer_also_deletes_tokens(self): self.test_oauth_flow() # Delete consumer consumer_id = self.consumer['key'] resp = self.delete('/OS-OAUTH1/consumers/%(consumer_id)s' % {'consumer_id': consumer_id}) self.assertResponseStatus(resp, http_client.NO_CONTENT) # List access_token should be 0 resp = self.get('/users/%(user_id)s/OS-OAUTH1/access_tokens' % {'user_id': self.user_id}) entities = resp.result['access_tokens'] self.assertEqual([], entities) # Check Keystone Token no longer exists headers = {'X-Subject-Token': self.keystone_token_id, 'X-Auth-Token': self.keystone_token_id} self.head('/auth/tokens', headers=headers, expected_status=http_client.NOT_FOUND) def test_change_user_password_also_deletes_tokens(self): self.test_oauth_flow() # delegated keystone token exists headers = {'X-Subject-Token': self.keystone_token_id, 'X-Auth-Token': self.keystone_token_id} r = self.get('/auth/tokens', headers=headers) self.assertValidTokenResponse(r, self.user) user = {'password': uuid.uuid4().hex} r = self.patch('/users/%(user_id)s' % { 'user_id': self.user['id']}, body={'user': user}) headers = {'X-Subject-Token': self.keystone_token_id, 'X-Auth-Token': self.keystone_token_id} self.admin_request(path='/auth/tokens', headers=headers, method='GET', expected_status=http_client.NOT_FOUND) def test_deleting_project_also_invalidates_tokens(self): self.test_oauth_flow() # delegated keystone token exists headers = {'X-Subject-Token': self.keystone_token_id, 'X-Auth-Token': self.keystone_token_id} r = self.get('/auth/tokens', headers=headers) self.assertValidTokenResponse(r, self.user) r = self.delete('/projects/%(project_id)s' % { 'project_id': self.project_id}) headers = {'X-Subject-Token': self.keystone_token_id, 'X-Auth-Token': self.keystone_token_id} self.admin_request(path='/auth/tokens', headers=headers, method='GET', expected_status=http_client.NOT_FOUND) def test_token_chaining_is_not_allowed(self): self.test_oauth_flow() # attempt to re-authenticate (token chain) with the given token path = '/v3/auth/tokens/' auth_data = self.build_authentication_request( token=self.keystone_token_id) self.admin_request( path=path, body=auth_data, token=self.keystone_token_id, method='POST', expected_status=http_client.FORBIDDEN) def test_delete_keystone_tokens_by_consumer_id(self): self.test_oauth_flow() self.token_provider_api._persistence.get_token(self.keystone_token_id) self.token_provider_api._persistence.delete_tokens( self.user_id, consumer_id=self.consumer['key']) self.assertRaises(exception.TokenNotFound, self.token_provider_api._persistence.get_token, self.keystone_token_id) def _create_trust_get_token(self): ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.user_id, project_id=self.project_id, impersonation=True, expires=dict(minutes=1), role_ids=[self.role_id]) del ref['id'] r = self.post('/OS-TRUST/trusts', body={'trust': ref}) trust = self.assertValidTrustResponse(r) auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], trust_id=trust['id']) return self.get_requested_token(auth_data) def _approve_request_token_url(self): consumer = self._create_single_consumer() consumer_id = consumer['id'] consumer_secret = consumer['secret'] self.consumer = {'key': consumer_id, 'secret': consumer_secret} self.assertIsNotNone(self.consumer['secret']) url, headers = self._create_request_token(self.consumer, self.project_id) content = self.post( url, headers=headers, response_content_type='application/x-www-urlformencoded') credentials = urllib.parse.parse_qs(content.result) request_key = credentials['oauth_token'][0] request_secret = credentials['oauth_token_secret'][0] self.request_token = oauth1.Token(request_key, request_secret) self.assertIsNotNone(self.request_token.key) url = self._authorize_request_token(request_key) return url def test_oauth_token_cannot_create_new_trust(self): self.test_oauth_flow() ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.user_id, project_id=self.project_id, impersonation=True, expires=dict(minutes=1), role_ids=[self.role_id]) del ref['id'] self.post('/OS-TRUST/trusts', body={'trust': ref}, token=self.keystone_token_id, expected_status=http_client.FORBIDDEN) def test_oauth_token_cannot_authorize_request_token(self): self.test_oauth_flow() url = self._approve_request_token_url() body = {'roles': [{'id': self.role_id}]} self.put(url, body=body, token=self.keystone_token_id, expected_status=http_client.FORBIDDEN) def test_oauth_token_cannot_list_request_tokens(self): self._set_policy({"identity:list_access_tokens": [], "identity:create_consumer": [], "identity:authorize_request_token": []}) self.test_oauth_flow() url = '/users/%s/OS-OAUTH1/access_tokens' % self.user_id self.get(url, token=self.keystone_token_id, expected_status=http_client.FORBIDDEN) def _set_policy(self, new_policy): self.tempfile = self.useFixture(temporaryfile.SecureTempFile()) self.tmpfilename = self.tempfile.file_name self.config_fixture.config(group='oslo_policy', policy_file=self.tmpfilename) with open(self.tmpfilename, "w") as policyfile: policyfile.write(jsonutils.dumps(new_policy)) def test_trust_token_cannot_authorize_request_token(self): trust_token = self._create_trust_get_token() url = self._approve_request_token_url() body = {'roles': [{'id': self.role_id}]} self.put(url, body=body, token=trust_token, expected_status=http_client.FORBIDDEN) def test_trust_token_cannot_list_request_tokens(self): self._set_policy({"identity:list_access_tokens": [], "identity:create_trust": []}) trust_token = self._create_trust_get_token() url = '/users/%s/OS-OAUTH1/access_tokens' % self.user_id self.get(url, token=trust_token, expected_status=http_client.FORBIDDEN) class FernetAuthTokenTests(AuthTokenTests): def config_overrides(self): super(FernetAuthTokenTests, self).config_overrides() self.config_fixture.config(group='token', provider='fernet') self.useFixture(ksfixtures.KeyRepository(self.config_fixture)) def test_delete_keystone_tokens_by_consumer_id(self): # NOTE(lbragstad): Fernet tokens are never persisted in the backend. pass class MaliciousOAuth1Tests(OAuth1Tests): def test_bad_consumer_secret(self): consumer = self._create_single_consumer() consumer_id = consumer['id'] consumer = {'key': consumer_id, 'secret': uuid.uuid4().hex} url, headers = self._create_request_token(consumer, self.project_id) self.post(url, headers=headers, expected_status=http_client.UNAUTHORIZED) def test_bad_request_token_key(self): consumer = self._create_single_consumer() consumer_id = consumer['id'] consumer_secret = consumer['secret'] consumer = {'key': consumer_id, 'secret': consumer_secret} url, headers = self._create_request_token(consumer, self.project_id) self.post( url, headers=headers, response_content_type='application/x-www-urlformencoded') url = self._authorize_request_token(uuid.uuid4().hex) body = {'roles': [{'id': self.role_id}]} self.put(url, body=body, expected_status=http_client.NOT_FOUND) def test_bad_consumer_id(self): consumer = self._create_single_consumer() consumer_id = uuid.uuid4().hex consumer_secret = consumer['secret'] consumer = {'key': consumer_id, 'secret': consumer_secret} url, headers = self._create_request_token(consumer, self.project_id) self.post(url, headers=headers, expected_status=http_client.NOT_FOUND) def test_bad_requested_project_id(self): consumer = self._create_single_consumer() consumer_id = consumer['id'] consumer_secret = consumer['secret'] consumer = {'key': consumer_id, 'secret': consumer_secret} project_id = uuid.uuid4().hex url, headers = self._create_request_token(consumer, project_id) self.post(url, headers=headers, expected_status=http_client.NOT_FOUND) def test_bad_verifier(self): consumer = self._create_single_consumer() consumer_id = consumer['id'] consumer_secret = consumer['secret'] consumer = {'key': consumer_id, 'secret': consumer_secret} url, headers = self._create_request_token(consumer, self.project_id) content = self.post( url, headers=headers, response_content_type='application/x-www-urlformencoded') credentials = urllib.parse.parse_qs(content.result) request_key = credentials['oauth_token'][0] request_secret = credentials['oauth_token_secret'][0] request_token = oauth1.Token(request_key, request_secret) url = self._authorize_request_token(request_key) body = {'roles': [{'id': self.role_id}]} resp = self.put(url, body=body, expected_status=http_client.OK) verifier = resp.result['token']['oauth_verifier'] self.assertIsNotNone(verifier) request_token.set_verifier(uuid.uuid4().hex) url, headers = self._create_access_token(consumer, request_token) self.post(url, headers=headers, expected_status=http_client.UNAUTHORIZED) def test_bad_authorizing_roles(self): consumer = self._create_single_consumer() consumer_id = consumer['id'] consumer_secret = consumer['secret'] consumer = {'key': consumer_id, 'secret': consumer_secret} url, headers = self._create_request_token(consumer, self.project_id) content = self.post( url, headers=headers, response_content_type='application/x-www-urlformencoded') credentials = urllib.parse.parse_qs(content.result) request_key = credentials['oauth_token'][0] self.assignment_api.remove_role_from_user_and_project( self.user_id, self.project_id, self.role_id) url = self._authorize_request_token(request_key) body = {'roles': [{'id': self.role_id}]} self.admin_request(path=url, method='PUT', body=body, expected_status=http_client.NOT_FOUND) def test_expired_authorizing_request_token(self): self.config_fixture.config(group='oauth1', request_token_duration=-1) consumer = self._create_single_consumer() consumer_id = consumer['id'] consumer_secret = consumer['secret'] self.consumer = {'key': consumer_id, 'secret': consumer_secret} self.assertIsNotNone(self.consumer['key']) url, headers = self._create_request_token(self.consumer, self.project_id) content = self.post( url, headers=headers, response_content_type='application/x-www-urlformencoded') credentials = urllib.parse.parse_qs(content.result) request_key = credentials['oauth_token'][0] request_secret = credentials['oauth_token_secret'][0] self.request_token = oauth1.Token(request_key, request_secret) self.assertIsNotNone(self.request_token.key) url = self._authorize_request_token(request_key) body = {'roles': [{'id': self.role_id}]} self.put(url, body=body, expected_status=http_client.UNAUTHORIZED) def test_expired_creating_keystone_token(self): self.config_fixture.config(group='oauth1', access_token_duration=-1) consumer = self._create_single_consumer() consumer_id = consumer['id'] consumer_secret = consumer['secret'] self.consumer = {'key': consumer_id, 'secret': consumer_secret} self.assertIsNotNone(self.consumer['key']) url, headers = self._create_request_token(self.consumer, self.project_id) content = self.post( url, headers=headers, response_content_type='application/x-www-urlformencoded') credentials = urllib.parse.parse_qs(content.result) request_key = credentials['oauth_token'][0] request_secret = credentials['oauth_token_secret'][0] self.request_token = oauth1.Token(request_key, request_secret) self.assertIsNotNone(self.request_token.key) url = self._authorize_request_token(request_key) body = {'roles': [{'id': self.role_id}]} resp = self.put(url, body=body, expected_status=http_client.OK) self.verifier = resp.result['token']['oauth_verifier'] self.request_token.set_verifier(self.verifier) url, headers = self._create_access_token(self.consumer, self.request_token) content = self.post( url, headers=headers, response_content_type='application/x-www-urlformencoded') credentials = urllib.parse.parse_qs(content.result) access_key = credentials['oauth_token'][0] access_secret = credentials['oauth_token_secret'][0] self.access_token = oauth1.Token(access_key, access_secret) self.assertIsNotNone(self.access_token.key) url, headers, body = self._get_oauth_token(self.consumer, self.access_token) self.post(url, headers=headers, body=body, expected_status=http_client.UNAUTHORIZED) def test_missing_oauth_headers(self): endpoint = '/OS-OAUTH1/request_token' client = oauth1.Client(uuid.uuid4().hex, client_secret=uuid.uuid4().hex, signature_method=oauth1.SIG_HMAC, callback_uri="oob") headers = {'requested_project_id': uuid.uuid4().hex} _url, headers, _body = client.sign(self.base_url + endpoint, http_method='POST', headers=headers) # NOTE(stevemar): To simulate this error, we remove the Authorization # header from the post request. del headers['Authorization'] self.post(endpoint, headers=headers, expected_status=http_client.INTERNAL_SERVER_ERROR) class OAuthNotificationTests(OAuth1Tests, test_notifications.BaseNotificationTest): def test_create_consumer(self): consumer_ref = self._create_single_consumer() self._assert_notify_sent(consumer_ref['id'], test_notifications.CREATED_OPERATION, 'OS-OAUTH1:consumer') self._assert_last_audit(consumer_ref['id'], test_notifications.CREATED_OPERATION, 'OS-OAUTH1:consumer', cadftaxonomy.SECURITY_ACCOUNT) def test_update_consumer(self): consumer_ref = self._create_single_consumer() update_ref = {'consumer': {'description': uuid.uuid4().hex}} self.oauth_api.update_consumer(consumer_ref['id'], update_ref) self._assert_notify_sent(consumer_ref['id'], test_notifications.UPDATED_OPERATION, 'OS-OAUTH1:consumer') self._assert_last_audit(consumer_ref['id'], test_notifications.UPDATED_OPERATION, 'OS-OAUTH1:consumer', cadftaxonomy.SECURITY_ACCOUNT) def test_delete_consumer(self): consumer_ref = self._create_single_consumer() self.oauth_api.delete_consumer(consumer_ref['id']) self._assert_notify_sent(consumer_ref['id'], test_notifications.DELETED_OPERATION, 'OS-OAUTH1:consumer') self._assert_last_audit(consumer_ref['id'], test_notifications.DELETED_OPERATION, 'OS-OAUTH1:consumer', cadftaxonomy.SECURITY_ACCOUNT) def test_oauth_flow_notifications(self): """Test to ensure notifications are sent for oauth tokens This test is very similar to test_oauth_flow, however there are additional checks in this test for ensuring that notifications for request token creation, and access token creation/deletion are emitted. """ consumer = self._create_single_consumer() consumer_id = consumer['id'] consumer_secret = consumer['secret'] self.consumer = {'key': consumer_id, 'secret': consumer_secret} self.assertIsNotNone(self.consumer['secret']) url, headers = self._create_request_token(self.consumer, self.project_id) content = self.post( url, headers=headers, response_content_type='application/x-www-urlformencoded') credentials = urllib.parse.parse_qs(content.result) request_key = credentials['oauth_token'][0] request_secret = credentials['oauth_token_secret'][0] self.request_token = oauth1.Token(request_key, request_secret) self.assertIsNotNone(self.request_token.key) # Test to ensure the create request token notification is sent self._assert_notify_sent(request_key, test_notifications.CREATED_OPERATION, 'OS-OAUTH1:request_token') self._assert_last_audit(request_key, test_notifications.CREATED_OPERATION, 'OS-OAUTH1:request_token', cadftaxonomy.SECURITY_CREDENTIAL) url = self._authorize_request_token(request_key) body = {'roles': [{'id': self.role_id}]} resp = self.put(url, body=body, expected_status=http_client.OK) self.verifier = resp.result['token']['oauth_verifier'] self.assertTrue(all(i in core.VERIFIER_CHARS for i in self.verifier)) self.assertEqual(8, len(self.verifier)) self.request_token.set_verifier(self.verifier) url, headers = self._create_access_token(self.consumer, self.request_token) content = self.post( url, headers=headers, response_content_type='application/x-www-urlformencoded') credentials = urllib.parse.parse_qs(content.result) access_key = credentials['oauth_token'][0] access_secret = credentials['oauth_token_secret'][0] self.access_token = oauth1.Token(access_key, access_secret) self.assertIsNotNone(self.access_token.key) # Test to ensure the create access token notification is sent self._assert_notify_sent(access_key, test_notifications.CREATED_OPERATION, 'OS-OAUTH1:access_token') self._assert_last_audit(access_key, test_notifications.CREATED_OPERATION, 'OS-OAUTH1:access_token', cadftaxonomy.SECURITY_CREDENTIAL) resp = self.delete('/users/%(user)s/OS-OAUTH1/access_tokens/%(auth)s' % {'user': self.user_id, 'auth': self.access_token.key}) self.assertResponseStatus(resp, http_client.NO_CONTENT) # Test to ensure the delete access token notification is sent self._assert_notify_sent(access_key, test_notifications.DELETED_OPERATION, 'OS-OAUTH1:access_token') self._assert_last_audit(access_key, test_notifications.DELETED_OPERATION, 'OS-OAUTH1:access_token', cadftaxonomy.SECURITY_CREDENTIAL) class OAuthCADFNotificationTests(OAuthNotificationTests): def setUp(self): """Repeat the tests for CADF notifications.""" super(OAuthCADFNotificationTests, self).setUp() self.config_fixture.config(notification_format='cadf') class JsonHomeTests(OAuth1Tests, test_v3.JsonHomeTestMixin): JSON_HOME_DATA = { 'http://docs.openstack.org/api/openstack-identity/3/ext/OS-OAUTH1/1.0/' 'rel/consumers': { 'href': '/OS-OAUTH1/consumers', }, } keystone-9.0.0/keystone/tests/unit/test_contrib_s3_core.py0000664000567000056710000001035312701407102025166 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from keystone.contrib import s3 from keystone import exception from keystone.tests import unit class S3ContribCore(unit.TestCase): def setUp(self): super(S3ContribCore, self).setUp() self.load_backends() self.controller = s3.S3Controller() def test_good_signature_v1(self): creds_ref = {'secret': u'b121dd41cdcc42fe9f70e572e84295aa'} credentials = {'token': 'UFVUCjFCMk0yWThBc2dUcGdBbVk3UGhDZmc9PQphcHB' 'saWNhdGlvbi9vY3RldC1zdHJlYW0KVHVlLCAxMSBEZWMgMjAxM' 'iAyMTo0MTo0MSBHTVQKL2NvbnRfczMvdXBsb2FkZWRfZnJ' 'vbV9zMy50eHQ=', 'signature': 'IL4QLcLVaYgylF9iHj6Wb8BGZsw='} self.assertIsNone(self.controller.check_signature(creds_ref, credentials)) def test_bad_signature_v1(self): creds_ref = {'secret': u'b121dd41cdcc42fe9f70e572e84295aa'} credentials = {'token': 'UFVUCjFCMk0yWThBc2dUcGdBbVk3UGhDZmc9PQphcHB' 'saWNhdGlvbi9vY3RldC1zdHJlYW0KVHVlLCAxMSBEZWMgMjAxM' 'iAyMTo0MTo0MSBHTVQKL2NvbnRfczMvdXBsb2FkZWRfZnJ' 'vbV9zMy50eHQ=', 'signature': uuid.uuid4().hex} self.assertRaises(exception.Unauthorized, self.controller.check_signature, creds_ref, credentials) def test_good_signature_v4(self): creds_ref = {'secret': u'e7a7a2240136494986991a6598d9fb9f'} credentials = {'token': 'QVdTNC1ITUFDLVNIQTI1NgoyMDE1MDgyNFQxMTIwNDFaCjIw' 'MTUwODI0L1JlZ2lvbk9uZS9zMy9hd3M0X3JlcXVlc3QKZjIy' 'MTU1ODBlZWI5YTE2NzM1MWJkOTNlODZjM2I2ZjA0YTkyOGY1' 'YzU1MjBhMzkzNWE0NTM1NDBhMDk1NjRiNQ==', 'signature': '730ba8f58df6ffeadd78f402e990b2910d60' 'bc5c2aec63619734f096a4dd77be'} self.assertIsNone(self.controller.check_signature(creds_ref, credentials)) def test_bad_signature_v4(self): creds_ref = {'secret': u'e7a7a2240136494986991a6598d9fb9f'} credentials = {'token': 'QVdTNC1ITUFDLVNIQTI1NgoyMDE1MDgyNFQxMTIwNDFaCjIw' 'MTUwODI0L1JlZ2lvbk9uZS9zMy9hd3M0X3JlcXVlc3QKZjIy' 'MTU1ODBlZWI5YTE2NzM1MWJkOTNlODZjM2I2ZjA0YTkyOGY1' 'YzU1MjBhMzkzNWE0NTM1NDBhMDk1NjRiNQ==', 'signature': uuid.uuid4().hex} self.assertRaises(exception.Unauthorized, self.controller.check_signature, creds_ref, credentials) def test_bad_token_v4(self): creds_ref = {'secret': u'e7a7a2240136494986991a6598d9fb9f'} # token has invalid format of first part credentials = {'token': 'QVdTNC1BQUEKWApYClg=', 'signature': ''} self.assertRaises(exception.Unauthorized, self.controller.check_signature, creds_ref, credentials) # token has invalid format of scope credentials = {'token': 'QVdTNC1ITUFDLVNIQTI1NgpYCi8vczMvYXdzTl9yZXF1ZXN0Clg=', 'signature': ''} self.assertRaises(exception.Unauthorized, self.controller.check_signature, creds_ref, credentials) keystone-9.0.0/keystone/tests/unit/test_validation.py0000664000567000056710000030664412701407105024261 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import six import testtools from keystone.assignment import schema as assignment_schema from keystone.catalog import schema as catalog_schema from keystone.common import validation from keystone.common.validation import parameter_types from keystone.common.validation import validators from keystone.credential import schema as credential_schema from keystone import exception from keystone.federation import schema as federation_schema from keystone.identity import schema as identity_schema from keystone.oauth1 import schema as oauth1_schema from keystone.policy import schema as policy_schema from keystone.resource import schema as resource_schema from keystone.tests import unit from keystone.trust import schema as trust_schema """Example model to validate create requests against. Assume that this is the only backend for the create and validate schemas. This is just an example to show how a backend can be used to construct a schema. In Keystone, schemas are built according to the Identity API and the backends available in Keystone. This example does not mean that all schema in Keystone were strictly based on the SQL backends. class Entity(sql.ModelBase): __tablename__ = 'entity' attributes = ['id', 'name', 'domain_id', 'description'] id = sql.Column(sql.String(64), primary_key=True) name = sql.Column(sql.String(255), nullable=False) description = sql.Column(sql.Text(), nullable=True) enabled = sql.Column(sql.Boolean, default=True, nullable=False) url = sql.Column(sql.String(225), nullable=True) email = sql.Column(sql.String(64), nullable=True) """ # Test schema to validate create requests against _entity_properties = { 'name': parameter_types.name, 'description': validation.nullable(parameter_types.description), 'enabled': parameter_types.boolean, 'url': validation.nullable(parameter_types.url), 'email': validation.nullable(parameter_types.email), 'id_string': validation.nullable(parameter_types.id_string) } entity_create = { 'type': 'object', 'properties': _entity_properties, 'required': ['name'], 'additionalProperties': True, } entity_create_optional_body = { 'type': 'object', 'properties': _entity_properties, 'additionalProperties': True, } entity_update = { 'type': 'object', 'properties': _entity_properties, 'minProperties': 1, 'additionalProperties': True, } _VALID_ENABLED_FORMATS = [True, False] _INVALID_ENABLED_FORMATS = ['some string', 1, 0, 'True', 'False'] _INVALID_DESC_FORMATS = [False, 1, 2.0] _VALID_URLS = ['https://example.com', 'http://EXAMPLE.com/v3', 'http://localhost', 'http://127.0.0.1:5000', 'http://1.1.1.1', 'http://255.255.255.255', 'http://[::1]', 'http://[::1]:35357', 'http://[1::8]', 'http://[fe80::8%25eth0]', 'http://[::1.2.3.4]', 'http://[2001:DB8::1.2.3.4]', 'http://[::a:1.2.3.4]', 'http://[a::b:1.2.3.4]', 'http://[1:2:3:4:5:6:7:8]', 'http://[1:2:3:4:5:6:1.2.3.4]', 'http://[abcd:efAB:CDEF:1111:9999::]'] _INVALID_URLS = [False, 'this is not a URL', 1234, 'www.example.com', 'localhost', 'http//something.com', 'https//something.com', ' http://example.com'] _VALID_FILTERS = [{'interface': 'admin'}, {'region': 'US-WEST', 'interface': 'internal'}] _INVALID_FILTERS = ['some string', 1, 0, True, False] def expected_validation_failure(msg): def wrapper(f): def wrapped(self, *args, **kwargs): args = (self,) + args e = self.assertRaises(exception.ValidationError, f, *args, **kwargs) self.assertIn(msg, six.text_type(e)) return wrapped return wrapper class ValidatedDecoratorTests(unit.BaseTestCase): entity_schema = { 'type': 'object', 'properties': { 'name': parameter_types.name, }, 'required': ['name'], } valid_entity = { 'name': uuid.uuid4().hex, } invalid_entity = { 'name': 1.0, # NOTE(dstanek): this is the incorrect type for name } @validation.validated(entity_create, 'entity') def create_entity(self, entity): """Used to test cases where validated param is the only param.""" @validation.validated(entity_create_optional_body, 'entity') def create_entity_optional_body(self, entity): """Used to test cases where there is an optional body.""" @validation.validated(entity_update, 'entity') def update_entity(self, entity_id, entity): """Used to test cases where validated param is not the only param.""" def test_calling_create_with_valid_entity_kwarg_succeeds(self): self.create_entity(entity=self.valid_entity) def test_calling_create_with_empty_entity_kwarg_succeeds(self): """Test the case when client passing in an empty kwarg reference.""" self.create_entity_optional_body(entity={}) @expected_validation_failure('Expecting to find entity in request body') def test_calling_create_with_kwarg_as_None_fails(self): self.create_entity(entity=None) def test_calling_create_with_valid_entity_arg_succeeds(self): self.create_entity(self.valid_entity) def test_calling_create_with_empty_entity_arg_succeeds(self): """Test the case when client passing in an empty entity reference.""" self.create_entity_optional_body({}) @expected_validation_failure("Invalid input for field 'name'") def test_calling_create_with_invalid_entity_fails(self): self.create_entity(self.invalid_entity) @expected_validation_failure('Expecting to find entity in request body') def test_calling_create_with_entity_arg_as_None_fails(self): self.create_entity(None) @expected_validation_failure('Expecting to find entity in request body') def test_calling_create_without_an_entity_fails(self): self.create_entity() def test_using_the_wrong_name_with_the_decorator_fails(self): with testtools.ExpectedException(TypeError): @validation.validated(self.entity_schema, 'entity_') def function(entity): pass # NOTE(dstanek): below are the test cases for making sure the validation # works when the validated param is not the only param. Since all of the # actual validation cases are tested above these test are for a sanity # check. def test_calling_update_with_valid_entity_succeeds(self): self.update_entity(uuid.uuid4().hex, self.valid_entity) @expected_validation_failure("Invalid input for field 'name'") def test_calling_update_with_invalid_entity_fails(self): self.update_entity(uuid.uuid4().hex, self.invalid_entity) def test_calling_update_with_empty_entity_kwarg_succeeds(self): """Test the case when client passing in an empty entity reference.""" global entity_update original_entity_update = entity_update.copy() # pop 'minProperties' from schema so that empty body is allowed. entity_update.pop('minProperties') self.update_entity(uuid.uuid4().hex, entity={}) entity_update = original_entity_update class EntityValidationTestCase(unit.BaseTestCase): def setUp(self): super(EntityValidationTestCase, self).setUp() self.resource_name = 'some resource name' self.description = 'Some valid description' self.valid_enabled = True self.valid_url = 'http://example.com' self.valid_email = 'joe@example.com' self.create_schema_validator = validators.SchemaValidator( entity_create) self.update_schema_validator = validators.SchemaValidator( entity_update) def test_create_entity_with_all_valid_parameters_validates(self): """Validate all parameter values against test schema.""" request_to_validate = {'name': self.resource_name, 'description': self.description, 'enabled': self.valid_enabled, 'url': self.valid_url, 'email': self.valid_email} self.create_schema_validator.validate(request_to_validate) def test_create_entity_with_only_required_valid_parameters_validates(self): """Validate correct for only parameters values against test schema.""" request_to_validate = {'name': self.resource_name} self.create_schema_validator.validate(request_to_validate) def test_create_entity_with_name_too_long_raises_exception(self): """Validate long names. Validate that an exception is raised when validating a string of 255+ characters passed in as a name. """ invalid_name = 'a' * 256 request_to_validate = {'name': invalid_name} self.assertRaises(exception.SchemaValidationError, self.create_schema_validator.validate, request_to_validate) def test_create_entity_with_name_too_short_raises_exception(self): """Validate short names. Test that an exception is raised when passing a string of length zero as a name parameter. """ request_to_validate = {'name': ''} self.assertRaises(exception.SchemaValidationError, self.create_schema_validator.validate, request_to_validate) def test_create_entity_with_unicode_name_validates(self): """Test that we successfully validate a unicode string.""" request_to_validate = {'name': u'αβγδ'} self.create_schema_validator.validate(request_to_validate) def test_create_entity_with_invalid_enabled_format_raises_exception(self): """Validate invalid enabled formats. Test that an exception is raised when passing invalid boolean-like values as `enabled`. """ for format in _INVALID_ENABLED_FORMATS: request_to_validate = {'name': self.resource_name, 'enabled': format} self.assertRaises(exception.SchemaValidationError, self.create_schema_validator.validate, request_to_validate) def test_create_entity_with_valid_enabled_formats_validates(self): """Validate valid enabled formats. Test that we have successful validation on boolean values for `enabled`. """ for valid_enabled in _VALID_ENABLED_FORMATS: request_to_validate = {'name': self.resource_name, 'enabled': valid_enabled} # Make sure validation doesn't raise a validation exception self.create_schema_validator.validate(request_to_validate) def test_create_entity_with_valid_urls_validates(self): """Test that proper urls are successfully validated.""" for valid_url in _VALID_URLS: request_to_validate = {'name': self.resource_name, 'url': valid_url} self.create_schema_validator.validate(request_to_validate) def test_create_entity_with_invalid_urls_fails(self): """Test that an exception is raised when validating improper urls.""" for invalid_url in _INVALID_URLS: request_to_validate = {'name': self.resource_name, 'url': invalid_url} self.assertRaises(exception.SchemaValidationError, self.create_schema_validator.validate, request_to_validate) def test_create_entity_with_valid_email_validates(self): """Validate email address Test that we successfully validate properly formatted email addresses. """ request_to_validate = {'name': self.resource_name, 'email': self.valid_email} self.create_schema_validator.validate(request_to_validate) def test_create_entity_with_invalid_email_fails(self): """Validate invalid email address. Test that an exception is raised when validating improperly formatted email addresses. """ request_to_validate = {'name': self.resource_name, 'email': 'some invalid email value'} self.assertRaises(exception.SchemaValidationError, self.create_schema_validator.validate, request_to_validate) def test_create_entity_with_valid_id_strings(self): """Validate acceptable id strings.""" valid_id_strings = [str(uuid.uuid4()), uuid.uuid4().hex, 'default'] for valid_id in valid_id_strings: request_to_validate = {'name': self.resource_name, 'id_string': valid_id} self.create_schema_validator.validate(request_to_validate) def test_create_entity_with_invalid_id_strings(self): """Exception raised when using invalid id strings.""" long_string = 'A' * 65 invalid_id_strings = ['', long_string] for invalid_id in invalid_id_strings: request_to_validate = {'name': self.resource_name, 'id_string': invalid_id} self.assertRaises(exception.SchemaValidationError, self.create_schema_validator.validate, request_to_validate) def test_create_entity_with_null_id_string(self): """Validate that None is an acceptable optional string type.""" request_to_validate = {'name': self.resource_name, 'id_string': None} self.create_schema_validator.validate(request_to_validate) def test_create_entity_with_null_string_succeeds(self): """Exception raised when passing None on required id strings.""" request_to_validate = {'name': self.resource_name, 'id_string': None} self.create_schema_validator.validate(request_to_validate) def test_update_entity_with_no_parameters_fails(self): """At least one parameter needs to be present for an update.""" request_to_validate = {} self.assertRaises(exception.SchemaValidationError, self.update_schema_validator.validate, request_to_validate) def test_update_entity_with_all_parameters_valid_validates(self): """Simulate updating an entity by ID.""" request_to_validate = {'name': self.resource_name, 'description': self.description, 'enabled': self.valid_enabled, 'url': self.valid_url, 'email': self.valid_email} self.update_schema_validator.validate(request_to_validate) def test_update_entity_with_a_valid_required_parameter_validates(self): """Succeed if a valid required parameter is provided.""" request_to_validate = {'name': self.resource_name} self.update_schema_validator.validate(request_to_validate) def test_update_entity_with_invalid_required_parameter_fails(self): """Fail if a provided required parameter is invalid.""" request_to_validate = {'name': 'a' * 256} self.assertRaises(exception.SchemaValidationError, self.update_schema_validator.validate, request_to_validate) def test_update_entity_with_a_null_optional_parameter_validates(self): """Optional parameters can be null to removed the value.""" request_to_validate = {'email': None} self.update_schema_validator.validate(request_to_validate) def test_update_entity_with_a_required_null_parameter_fails(self): """The `name` parameter can't be null.""" request_to_validate = {'name': None} self.assertRaises(exception.SchemaValidationError, self.update_schema_validator.validate, request_to_validate) def test_update_entity_with_a_valid_optional_parameter_validates(self): """Succeeds with only a single valid optional parameter.""" request_to_validate = {'email': self.valid_email} self.update_schema_validator.validate(request_to_validate) def test_update_entity_with_invalid_optional_parameter_fails(self): """Fails when an optional parameter is invalid.""" request_to_validate = {'email': 0} self.assertRaises(exception.SchemaValidationError, self.update_schema_validator.validate, request_to_validate) class ProjectValidationTestCase(unit.BaseTestCase): """Test for V3 Project API validation.""" def setUp(self): super(ProjectValidationTestCase, self).setUp() self.project_name = 'My Project' create = resource_schema.project_create update = resource_schema.project_update self.create_project_validator = validators.SchemaValidator(create) self.update_project_validator = validators.SchemaValidator(update) def test_validate_project_request(self): """Test that we validate a project with `name` in request.""" request_to_validate = {'name': self.project_name} self.create_project_validator.validate(request_to_validate) def test_validate_project_request_without_name_fails(self): """Validate project request fails without name.""" request_to_validate = {'enabled': True} self.assertRaises(exception.SchemaValidationError, self.create_project_validator.validate, request_to_validate) def test_validate_project_request_with_enabled(self): """Validate `enabled` as boolean-like values for projects.""" for valid_enabled in _VALID_ENABLED_FORMATS: request_to_validate = {'name': self.project_name, 'enabled': valid_enabled} self.create_project_validator.validate(request_to_validate) def test_validate_project_request_with_invalid_enabled_fails(self): """Exception is raised when `enabled` isn't a boolean-like value.""" for invalid_enabled in _INVALID_ENABLED_FORMATS: request_to_validate = {'name': self.project_name, 'enabled': invalid_enabled} self.assertRaises(exception.SchemaValidationError, self.create_project_validator.validate, request_to_validate) def test_validate_project_request_with_valid_description(self): """Test that we validate `description` in create project requests.""" request_to_validate = {'name': self.project_name, 'description': 'My Project'} self.create_project_validator.validate(request_to_validate) def test_validate_project_request_with_invalid_description_fails(self): """Exception is raised when `description` as a non-string value.""" request_to_validate = {'name': self.project_name, 'description': False} self.assertRaises(exception.SchemaValidationError, self.create_project_validator.validate, request_to_validate) def test_validate_project_request_with_name_too_long(self): """Exception is raised when `name` is too long.""" long_project_name = 'a' * 65 request_to_validate = {'name': long_project_name} self.assertRaises(exception.SchemaValidationError, self.create_project_validator.validate, request_to_validate) def test_validate_project_request_with_name_too_short(self): """Exception raised when `name` is too short.""" request_to_validate = {'name': ''} self.assertRaises(exception.SchemaValidationError, self.create_project_validator.validate, request_to_validate) def test_validate_project_request_with_valid_parent_id(self): """Test that we validate `parent_id` in create project requests.""" # parent_id is nullable request_to_validate = {'name': self.project_name, 'parent_id': None} self.create_project_validator.validate(request_to_validate) request_to_validate = {'name': self.project_name, 'parent_id': uuid.uuid4().hex} self.create_project_validator.validate(request_to_validate) def test_validate_project_request_with_invalid_parent_id_fails(self): """Exception is raised when `parent_id` as a non-id value.""" request_to_validate = {'name': self.project_name, 'parent_id': False} self.assertRaises(exception.SchemaValidationError, self.create_project_validator.validate, request_to_validate) request_to_validate = {'name': self.project_name, 'parent_id': 'fake project'} self.assertRaises(exception.SchemaValidationError, self.create_project_validator.validate, request_to_validate) def test_validate_project_update_request(self): """Test that we validate a project update request.""" request_to_validate = {'domain_id': uuid.uuid4().hex} self.update_project_validator.validate(request_to_validate) def test_validate_project_update_request_with_no_parameters_fails(self): """Exception is raised when updating project without parameters.""" request_to_validate = {} self.assertRaises(exception.SchemaValidationError, self.update_project_validator.validate, request_to_validate) def test_validate_project_update_request_with_name_too_long_fails(self): """Exception raised when updating a project with `name` too long.""" long_project_name = 'a' * 65 request_to_validate = {'name': long_project_name} self.assertRaises(exception.SchemaValidationError, self.update_project_validator.validate, request_to_validate) def test_validate_project_update_request_with_name_too_short_fails(self): """Exception raised when updating a project with `name` too short.""" request_to_validate = {'name': ''} self.assertRaises(exception.SchemaValidationError, self.update_project_validator.validate, request_to_validate) def test_validate_project_create_request_with_valid_domain_id(self): """Test that we validate `domain_id` in create project requests.""" # domain_id is nullable for domain_id in [None, uuid.uuid4().hex]: request_to_validate = {'name': self.project_name, 'domain_id': domain_id} self.create_project_validator.validate(request_to_validate) def test_validate_project_request_with_invalid_domain_id_fails(self): """Exception is raised when `domain_id` is a non-id value.""" for domain_id in [False, 'fake_project']: request_to_validate = {'name': self.project_name, 'domain_id': domain_id} self.assertRaises(exception.SchemaValidationError, self.create_project_validator.validate, request_to_validate) class DomainValidationTestCase(unit.BaseTestCase): """Test for V3 Domain API validation.""" def setUp(self): super(DomainValidationTestCase, self).setUp() self.domain_name = 'My Domain' create = resource_schema.domain_create update = resource_schema.domain_update self.create_domain_validator = validators.SchemaValidator(create) self.update_domain_validator = validators.SchemaValidator(update) def test_validate_domain_request(self): """Make sure we successfully validate a create domain request.""" request_to_validate = {'name': self.domain_name} self.create_domain_validator.validate(request_to_validate) def test_validate_domain_request_without_name_fails(self): """Make sure we raise an exception when `name` isn't included.""" request_to_validate = {'enabled': True} self.assertRaises(exception.SchemaValidationError, self.create_domain_validator.validate, request_to_validate) def test_validate_domain_request_with_enabled(self): """Validate `enabled` as boolean-like values for domains.""" for valid_enabled in _VALID_ENABLED_FORMATS: request_to_validate = {'name': self.domain_name, 'enabled': valid_enabled} self.create_domain_validator.validate(request_to_validate) def test_validate_domain_request_with_invalid_enabled_fails(self): """Exception is raised when `enabled` isn't a boolean-like value.""" for invalid_enabled in _INVALID_ENABLED_FORMATS: request_to_validate = {'name': self.domain_name, 'enabled': invalid_enabled} self.assertRaises(exception.SchemaValidationError, self.create_domain_validator.validate, request_to_validate) def test_validate_domain_request_with_valid_description(self): """Test that we validate `description` in create domain requests.""" request_to_validate = {'name': self.domain_name, 'description': 'My Domain'} self.create_domain_validator.validate(request_to_validate) def test_validate_domain_request_with_invalid_description_fails(self): """Exception is raised when `description` is a non-string value.""" request_to_validate = {'name': self.domain_name, 'description': False} self.assertRaises(exception.SchemaValidationError, self.create_domain_validator.validate, request_to_validate) def test_validate_domain_request_with_name_too_long(self): """Exception is raised when `name` is too long.""" long_domain_name = 'a' * 65 request_to_validate = {'name': long_domain_name} self.assertRaises(exception.SchemaValidationError, self.create_domain_validator.validate, request_to_validate) def test_validate_domain_request_with_name_too_short(self): """Exception raised when `name` is too short.""" request_to_validate = {'name': ''} self.assertRaises(exception.SchemaValidationError, self.create_domain_validator.validate, request_to_validate) def test_validate_domain_update_request(self): """Test that we validate a domain update request.""" request_to_validate = {'domain_id': uuid.uuid4().hex} self.update_domain_validator.validate(request_to_validate) def test_validate_domain_update_request_with_no_parameters_fails(self): """Exception is raised when updating a domain without parameters.""" request_to_validate = {} self.assertRaises(exception.SchemaValidationError, self.update_domain_validator.validate, request_to_validate) def test_validate_domain_update_request_with_name_too_long_fails(self): """Exception raised when updating a domain with `name` too long.""" long_domain_name = 'a' * 65 request_to_validate = {'name': long_domain_name} self.assertRaises(exception.SchemaValidationError, self.update_domain_validator.validate, request_to_validate) def test_validate_domain_update_request_with_name_too_short_fails(self): """Exception raised when updating a domain with `name` too short.""" request_to_validate = {'name': ''} self.assertRaises(exception.SchemaValidationError, self.update_domain_validator.validate, request_to_validate) class RoleValidationTestCase(unit.BaseTestCase): """Test for V3 Role API validation.""" def setUp(self): super(RoleValidationTestCase, self).setUp() self.role_name = 'My Role' create = assignment_schema.role_create update = assignment_schema.role_update self.create_role_validator = validators.SchemaValidator(create) self.update_role_validator = validators.SchemaValidator(update) def test_validate_role_request(self): """Test we can successfully validate a create role request.""" request_to_validate = {'name': self.role_name} self.create_role_validator.validate(request_to_validate) def test_validate_role_create_without_name_raises_exception(self): """Test that we raise an exception when `name` isn't included.""" request_to_validate = {'enabled': True} self.assertRaises(exception.SchemaValidationError, self.create_role_validator.validate, request_to_validate) def test_validate_role_create_when_name_is_not_string_fails(self): """Exception is raised on role create with a non-string `name`.""" request_to_validate = {'name': True} self.assertRaises(exception.SchemaValidationError, self.create_role_validator.validate, request_to_validate) request_to_validate = {'name': 24} self.assertRaises(exception.SchemaValidationError, self.create_role_validator.validate, request_to_validate) def test_validate_role_update_request(self): """Test that we validate a role update request.""" request_to_validate = {'name': 'My New Role'} self.update_role_validator.validate(request_to_validate) def test_validate_role_update_fails_with_invalid_name_fails(self): """Exception when validating an update request with invalid `name`.""" request_to_validate = {'name': True} self.assertRaises(exception.SchemaValidationError, self.update_role_validator.validate, request_to_validate) request_to_validate = {'name': 24} self.assertRaises(exception.SchemaValidationError, self.update_role_validator.validate, request_to_validate) class PolicyValidationTestCase(unit.BaseTestCase): """Test for V3 Policy API validation.""" def setUp(self): super(PolicyValidationTestCase, self).setUp() create = policy_schema.policy_create update = policy_schema.policy_update self.create_policy_validator = validators.SchemaValidator(create) self.update_policy_validator = validators.SchemaValidator(update) def test_validate_policy_succeeds(self): """Test that we validate a create policy request.""" request_to_validate = {'blob': 'some blob information', 'type': 'application/json'} self.create_policy_validator.validate(request_to_validate) def test_validate_policy_without_blob_fails(self): """Exception raised without `blob` in request.""" request_to_validate = {'type': 'application/json'} self.assertRaises(exception.SchemaValidationError, self.create_policy_validator.validate, request_to_validate) def test_validate_policy_without_type_fails(self): """Exception raised without `type` in request.""" request_to_validate = {'blob': 'some blob information'} self.assertRaises(exception.SchemaValidationError, self.create_policy_validator.validate, request_to_validate) def test_validate_policy_create_with_extra_parameters_succeeds(self): """Validate policy create with extra parameters.""" request_to_validate = {'blob': 'some blob information', 'type': 'application/json', 'extra': 'some extra stuff'} self.create_policy_validator.validate(request_to_validate) def test_validate_policy_create_with_invalid_type_fails(self): """Exception raised when `blob` and `type` are boolean.""" for prop in ['blob', 'type']: request_to_validate = {prop: False} self.assertRaises(exception.SchemaValidationError, self.create_policy_validator.validate, request_to_validate) def test_validate_policy_update_without_parameters_fails(self): """Exception raised when updating policy without parameters.""" request_to_validate = {} self.assertRaises(exception.SchemaValidationError, self.update_policy_validator.validate, request_to_validate) def test_validate_policy_update_with_extra_parameters_succeeds(self): """Validate policy update request with extra parameters.""" request_to_validate = {'blob': 'some blob information', 'type': 'application/json', 'extra': 'some extra stuff'} self.update_policy_validator.validate(request_to_validate) def test_validate_policy_update_succeeds(self): """Test that we validate a policy update request.""" request_to_validate = {'blob': 'some blob information', 'type': 'application/json'} self.update_policy_validator.validate(request_to_validate) def test_validate_policy_update_with_invalid_type_fails(self): """Exception raised when invalid `type` on policy update.""" for prop in ['blob', 'type']: request_to_validate = {prop: False} self.assertRaises(exception.SchemaValidationError, self.update_policy_validator.validate, request_to_validate) class CredentialValidationTestCase(unit.BaseTestCase): """Test for V3 Credential API validation.""" def setUp(self): super(CredentialValidationTestCase, self).setUp() create = credential_schema.credential_create update = credential_schema.credential_update self.create_credential_validator = validators.SchemaValidator(create) self.update_credential_validator = validators.SchemaValidator(update) def test_validate_credential_succeeds(self): """Test that we validate a credential request.""" request_to_validate = {'blob': 'some string', 'project_id': uuid.uuid4().hex, 'type': 'ec2', 'user_id': uuid.uuid4().hex} self.create_credential_validator.validate(request_to_validate) def test_validate_credential_without_blob_fails(self): """Exception raised without `blob` in create request.""" request_to_validate = {'type': 'ec2', 'user_id': uuid.uuid4().hex} self.assertRaises(exception.SchemaValidationError, self.create_credential_validator.validate, request_to_validate) def test_validate_credential_without_user_id_fails(self): """Exception raised without `user_id` in create request.""" request_to_validate = {'blob': 'some credential blob', 'type': 'ec2'} self.assertRaises(exception.SchemaValidationError, self.create_credential_validator.validate, request_to_validate) def test_validate_credential_without_type_fails(self): """Exception raised without `type` in create request.""" request_to_validate = {'blob': 'some credential blob', 'user_id': uuid.uuid4().hex} self.assertRaises(exception.SchemaValidationError, self.create_credential_validator.validate, request_to_validate) def test_validate_credential_ec2_without_project_id_fails(self): """Validate `project_id` is required for ec2. Test that a SchemaValidationError is raised when type is ec2 and no `project_id` is provided in create request. """ request_to_validate = {'blob': 'some credential blob', 'type': 'ec2', 'user_id': uuid.uuid4().hex} self.assertRaises(exception.SchemaValidationError, self.create_credential_validator.validate, request_to_validate) def test_validate_credential_with_project_id_succeeds(self): """Test that credential request works for all types.""" cred_types = ['ec2', 'cert', uuid.uuid4().hex] for c_type in cred_types: request_to_validate = {'blob': 'some blob', 'project_id': uuid.uuid4().hex, 'type': c_type, 'user_id': uuid.uuid4().hex} # Make sure an exception isn't raised self.create_credential_validator.validate(request_to_validate) def test_validate_credential_non_ec2_without_project_id_succeeds(self): """Validate `project_id` is not required for non-ec2. Test that create request without `project_id` succeeds for any non-ec2 credential. """ cred_types = ['cert', uuid.uuid4().hex] for c_type in cred_types: request_to_validate = {'blob': 'some blob', 'type': c_type, 'user_id': uuid.uuid4().hex} # Make sure an exception isn't raised self.create_credential_validator.validate(request_to_validate) def test_validate_credential_with_extra_parameters_succeeds(self): """Validate create request with extra parameters.""" request_to_validate = {'blob': 'some string', 'extra': False, 'project_id': uuid.uuid4().hex, 'type': 'ec2', 'user_id': uuid.uuid4().hex} self.create_credential_validator.validate(request_to_validate) def test_validate_credential_update_succeeds(self): """Test that a credential request is properly validated.""" request_to_validate = {'blob': 'some string', 'project_id': uuid.uuid4().hex, 'type': 'ec2', 'user_id': uuid.uuid4().hex} self.update_credential_validator.validate(request_to_validate) def test_validate_credential_update_without_parameters_fails(self): """Exception is raised on update without parameters.""" request_to_validate = {} self.assertRaises(exception.SchemaValidationError, self.update_credential_validator.validate, request_to_validate) def test_validate_credential_update_with_extra_parameters_succeeds(self): """Validate credential update with extra parameters.""" request_to_validate = {'blob': 'some string', 'extra': False, 'project_id': uuid.uuid4().hex, 'type': 'ec2', 'user_id': uuid.uuid4().hex} self.update_credential_validator.validate(request_to_validate) class RegionValidationTestCase(unit.BaseTestCase): """Test for V3 Region API validation.""" def setUp(self): super(RegionValidationTestCase, self).setUp() self.region_name = 'My Region' create = catalog_schema.region_create update = catalog_schema.region_update self.create_region_validator = validators.SchemaValidator(create) self.update_region_validator = validators.SchemaValidator(update) def test_validate_region_request(self): """Test that we validate a basic region request.""" # Create_region doesn't take any parameters in the request so let's # make sure we cover that case. request_to_validate = {} self.create_region_validator.validate(request_to_validate) def test_validate_region_create_request_with_parameters(self): """Test that we validate a region request with parameters.""" request_to_validate = {'id': 'us-east', 'description': 'US East Region', 'parent_region_id': 'US Region'} self.create_region_validator.validate(request_to_validate) def test_validate_region_create_with_uuid(self): """Test that we validate a region request with a UUID as the id.""" request_to_validate = {'id': uuid.uuid4().hex, 'description': 'US East Region', 'parent_region_id': uuid.uuid4().hex} self.create_region_validator.validate(request_to_validate) def test_validate_region_create_fails_with_invalid_region_id(self): """Exception raised when passing invalid `id` in request.""" request_to_validate = {'id': 1234, 'description': 'US East Region'} self.assertRaises(exception.SchemaValidationError, self.create_region_validator.validate, request_to_validate) def test_validate_region_create_succeeds_with_extra_parameters(self): """Validate create region request with extra values.""" request_to_validate = {'other_attr': uuid.uuid4().hex} self.create_region_validator.validate(request_to_validate) def test_validate_region_create_succeeds_with_no_parameters(self): """Validate create region request with no parameters.""" request_to_validate = {} self.create_region_validator.validate(request_to_validate) def test_validate_region_update_succeeds(self): """Test that we validate a region update request.""" request_to_validate = {'id': 'us-west', 'description': 'US West Region', 'parent_region_id': 'us-region'} self.update_region_validator.validate(request_to_validate) def test_validate_region_update_succeeds_with_extra_parameters(self): """Validate extra attributes in the region update request.""" request_to_validate = {'other_attr': uuid.uuid4().hex} self.update_region_validator.validate(request_to_validate) def test_validate_region_update_fails_with_no_parameters(self): """Exception raised when passing no parameters in a region update.""" # An update request should consist of at least one value to update request_to_validate = {} self.assertRaises(exception.SchemaValidationError, self.update_region_validator.validate, request_to_validate) class ServiceValidationTestCase(unit.BaseTestCase): """Test for V3 Service API validation.""" def setUp(self): super(ServiceValidationTestCase, self).setUp() create = catalog_schema.service_create update = catalog_schema.service_update self.create_service_validator = validators.SchemaValidator(create) self.update_service_validator = validators.SchemaValidator(update) def test_validate_service_create_succeeds(self): """Test that we validate a service create request.""" request_to_validate = {'name': 'Nova', 'description': 'OpenStack Compute Service', 'enabled': True, 'type': 'compute'} self.create_service_validator.validate(request_to_validate) def test_validate_service_create_succeeds_with_required_parameters(self): """Validate a service create request with the required parameters.""" # The only parameter type required for service creation is 'type' request_to_validate = {'type': 'compute'} self.create_service_validator.validate(request_to_validate) def test_validate_service_create_fails_without_type(self): """Exception raised when trying to create a service without `type`.""" request_to_validate = {'name': 'Nova'} self.assertRaises(exception.SchemaValidationError, self.create_service_validator.validate, request_to_validate) def test_validate_service_create_succeeds_with_extra_parameters(self): """Test that extra parameters pass validation on create service.""" request_to_validate = {'other_attr': uuid.uuid4().hex, 'type': uuid.uuid4().hex} self.create_service_validator.validate(request_to_validate) def test_validate_service_create_succeeds_with_valid_enabled(self): """Validate boolean values as enabled values on service create.""" for valid_enabled in _VALID_ENABLED_FORMATS: request_to_validate = {'enabled': valid_enabled, 'type': uuid.uuid4().hex} self.create_service_validator.validate(request_to_validate) def test_validate_service_create_fails_with_invalid_enabled(self): """Exception raised when boolean-like parameters as `enabled` On service create, make sure an exception is raised if `enabled` is not a boolean value. """ for invalid_enabled in _INVALID_ENABLED_FORMATS: request_to_validate = {'enabled': invalid_enabled, 'type': uuid.uuid4().hex} self.assertRaises(exception.SchemaValidationError, self.create_service_validator.validate, request_to_validate) def test_validate_service_create_fails_when_name_too_long(self): """Exception raised when `name` is greater than 255 characters.""" long_name = 'a' * 256 request_to_validate = {'type': 'compute', 'name': long_name} self.assertRaises(exception.SchemaValidationError, self.create_service_validator.validate, request_to_validate) def test_validate_service_create_fails_when_name_too_short(self): """Exception is raised when `name` is too short.""" request_to_validate = {'type': 'compute', 'name': ''} self.assertRaises(exception.SchemaValidationError, self.create_service_validator.validate, request_to_validate) def test_validate_service_create_fails_when_type_too_long(self): """Exception is raised when `type` is too long.""" long_type_name = 'a' * 256 request_to_validate = {'type': long_type_name} self.assertRaises(exception.SchemaValidationError, self.create_service_validator.validate, request_to_validate) def test_validate_service_create_fails_when_type_too_short(self): """Exception is raised when `type` is too short.""" request_to_validate = {'type': ''} self.assertRaises(exception.SchemaValidationError, self.create_service_validator.validate, request_to_validate) def test_validate_service_update_request_succeeds(self): """Test that we validate a service update request.""" request_to_validate = {'name': 'Cinder', 'type': 'volume', 'description': 'OpenStack Block Storage', 'enabled': False} self.update_service_validator.validate(request_to_validate) def test_validate_service_update_fails_with_no_parameters(self): """Exception raised when updating a service without values.""" request_to_validate = {} self.assertRaises(exception.SchemaValidationError, self.update_service_validator.validate, request_to_validate) def test_validate_service_update_succeeds_with_extra_parameters(self): """Validate updating a service with extra parameters.""" request_to_validate = {'other_attr': uuid.uuid4().hex} self.update_service_validator.validate(request_to_validate) def test_validate_service_update_succeeds_with_valid_enabled(self): """Validate boolean formats as `enabled` on service update.""" for valid_enabled in _VALID_ENABLED_FORMATS: request_to_validate = {'enabled': valid_enabled} self.update_service_validator.validate(request_to_validate) def test_validate_service_update_fails_with_invalid_enabled(self): """Exception raised when boolean-like values as `enabled`.""" for invalid_enabled in _INVALID_ENABLED_FORMATS: request_to_validate = {'enabled': invalid_enabled} self.assertRaises(exception.SchemaValidationError, self.update_service_validator.validate, request_to_validate) def test_validate_service_update_fails_with_name_too_long(self): """Exception is raised when `name` is too long on update.""" long_name = 'a' * 256 request_to_validate = {'name': long_name} self.assertRaises(exception.SchemaValidationError, self.update_service_validator.validate, request_to_validate) def test_validate_service_update_fails_with_name_too_short(self): """Exception is raised when `name` is too short on update.""" request_to_validate = {'name': ''} self.assertRaises(exception.SchemaValidationError, self.update_service_validator.validate, request_to_validate) def test_validate_service_update_fails_with_type_too_long(self): """Exception is raised when `type` is too long on update.""" long_type_name = 'a' * 256 request_to_validate = {'type': long_type_name} self.assertRaises(exception.SchemaValidationError, self.update_service_validator.validate, request_to_validate) def test_validate_service_update_fails_with_type_too_short(self): """Exception is raised when `type` is too short on update.""" request_to_validate = {'type': ''} self.assertRaises(exception.SchemaValidationError, self.update_service_validator.validate, request_to_validate) class EndpointValidationTestCase(unit.BaseTestCase): """Test for V3 Endpoint API validation.""" def setUp(self): super(EndpointValidationTestCase, self).setUp() create = catalog_schema.endpoint_create update = catalog_schema.endpoint_update self.create_endpoint_validator = validators.SchemaValidator(create) self.update_endpoint_validator = validators.SchemaValidator(update) def test_validate_endpoint_request_succeeds(self): """Test that we validate an endpoint request.""" request_to_validate = {'enabled': True, 'interface': 'admin', 'region_id': uuid.uuid4().hex, 'service_id': uuid.uuid4().hex, 'url': 'https://service.example.com:5000/'} self.create_endpoint_validator.validate(request_to_validate) def test_validate_endpoint_create_succeeds_with_required_parameters(self): """Validate an endpoint request with only the required parameters.""" # According to the Identity V3 API endpoint creation requires # 'service_id', 'interface', and 'url' request_to_validate = {'service_id': uuid.uuid4().hex, 'interface': 'public', 'url': 'https://service.example.com:5000/'} self.create_endpoint_validator.validate(request_to_validate) def test_validate_endpoint_create_succeeds_with_valid_enabled(self): """Validate an endpoint with boolean values. Validate boolean values as `enabled` in endpoint create requests. """ for valid_enabled in _VALID_ENABLED_FORMATS: request_to_validate = {'enabled': valid_enabled, 'service_id': uuid.uuid4().hex, 'interface': 'public', 'url': 'https://service.example.com:5000/'} self.create_endpoint_validator.validate(request_to_validate) def test_validate_create_endpoint_fails_with_invalid_enabled(self): """Exception raised when boolean-like values as `enabled`.""" for invalid_enabled in _INVALID_ENABLED_FORMATS: request_to_validate = {'enabled': invalid_enabled, 'service_id': uuid.uuid4().hex, 'interface': 'public', 'url': 'https://service.example.com:5000/'} self.assertRaises(exception.SchemaValidationError, self.create_endpoint_validator.validate, request_to_validate) def test_validate_endpoint_create_succeeds_with_extra_parameters(self): """Test that extra parameters pass validation on create endpoint.""" request_to_validate = {'other_attr': uuid.uuid4().hex, 'service_id': uuid.uuid4().hex, 'interface': 'public', 'url': 'https://service.example.com:5000/'} self.create_endpoint_validator.validate(request_to_validate) def test_validate_endpoint_create_fails_without_service_id(self): """Exception raised when `service_id` isn't in endpoint request.""" request_to_validate = {'interface': 'public', 'url': 'https://service.example.com:5000/'} self.assertRaises(exception.SchemaValidationError, self.create_endpoint_validator.validate, request_to_validate) def test_validate_endpoint_create_fails_without_interface(self): """Exception raised when `interface` isn't in endpoint request.""" request_to_validate = {'service_id': uuid.uuid4().hex, 'url': 'https://service.example.com:5000/'} self.assertRaises(exception.SchemaValidationError, self.create_endpoint_validator.validate, request_to_validate) def test_validate_endpoint_create_fails_without_url(self): """Exception raised when `url` isn't in endpoint request.""" request_to_validate = {'service_id': uuid.uuid4().hex, 'interface': 'public'} self.assertRaises(exception.SchemaValidationError, self.create_endpoint_validator.validate, request_to_validate) def test_validate_endpoint_create_succeeds_with_url(self): """Validate `url` attribute in endpoint create request.""" request_to_validate = {'service_id': uuid.uuid4().hex, 'interface': 'public'} for url in _VALID_URLS: request_to_validate['url'] = url self.create_endpoint_validator.validate(request_to_validate) def test_validate_endpoint_create_fails_with_invalid_url(self): """Exception raised when passing invalid `url` in request.""" request_to_validate = {'service_id': uuid.uuid4().hex, 'interface': 'public'} for url in _INVALID_URLS: request_to_validate['url'] = url self.assertRaises(exception.SchemaValidationError, self.create_endpoint_validator.validate, request_to_validate) def test_validate_endpoint_create_fails_with_invalid_interface(self): """Exception raised with invalid `interface`.""" request_to_validate = {'interface': uuid.uuid4().hex, 'service_id': uuid.uuid4().hex, 'url': 'https://service.example.com:5000/'} self.assertRaises(exception.SchemaValidationError, self.create_endpoint_validator.validate, request_to_validate) def test_validate_endpoint_create_fails_with_invalid_region_id(self): """Exception raised when passing invalid `region(_id)` in request.""" request_to_validate = {'interface': 'admin', 'region_id': 1234, 'service_id': uuid.uuid4().hex, 'url': 'https://service.example.com:5000/'} self.assertRaises(exception.SchemaValidationError, self.create_endpoint_validator.validate, request_to_validate) request_to_validate = {'interface': 'admin', 'region': 1234, 'service_id': uuid.uuid4().hex, 'url': 'https://service.example.com:5000/'} self.assertRaises(exception.SchemaValidationError, self.create_endpoint_validator.validate, request_to_validate) def test_validate_endpoint_update_fails_with_invalid_enabled(self): """Exception raised when `enabled` is boolean-like value.""" for invalid_enabled in _INVALID_ENABLED_FORMATS: request_to_validate = {'enabled': invalid_enabled} self.assertRaises(exception.SchemaValidationError, self.update_endpoint_validator.validate, request_to_validate) def test_validate_endpoint_update_succeeds_with_valid_enabled(self): """Validate `enabled` as boolean values.""" for valid_enabled in _VALID_ENABLED_FORMATS: request_to_validate = {'enabled': valid_enabled} self.update_endpoint_validator.validate(request_to_validate) def test_validate_endpoint_update_fails_with_invalid_interface(self): """Exception raised when invalid `interface` on endpoint update.""" request_to_validate = {'interface': uuid.uuid4().hex, 'service_id': uuid.uuid4().hex, 'url': 'https://service.example.com:5000/'} self.assertRaises(exception.SchemaValidationError, self.update_endpoint_validator.validate, request_to_validate) def test_validate_endpoint_update_request_succeeds(self): """Test that we validate an endpoint update request.""" request_to_validate = {'enabled': True, 'interface': 'admin', 'region_id': uuid.uuid4().hex, 'service_id': uuid.uuid4().hex, 'url': 'https://service.example.com:5000/'} self.update_endpoint_validator.validate(request_to_validate) def test_validate_endpoint_update_fails_with_no_parameters(self): """Exception raised when no parameters on endpoint update.""" request_to_validate = {} self.assertRaises(exception.SchemaValidationError, self.update_endpoint_validator.validate, request_to_validate) def test_validate_endpoint_update_succeeds_with_extra_parameters(self): """Test that extra parameters pass validation on update endpoint.""" request_to_validate = {'enabled': True, 'interface': 'admin', 'region_id': uuid.uuid4().hex, 'service_id': uuid.uuid4().hex, 'url': 'https://service.example.com:5000/', 'other_attr': uuid.uuid4().hex} self.update_endpoint_validator.validate(request_to_validate) def test_validate_endpoint_update_succeeds_with_url(self): """Validate `url` attribute in endpoint update request.""" request_to_validate = {'service_id': uuid.uuid4().hex, 'interface': 'public'} for url in _VALID_URLS: request_to_validate['url'] = url self.update_endpoint_validator.validate(request_to_validate) def test_validate_endpoint_update_fails_with_invalid_url(self): """Exception raised when passing invalid `url` in request.""" request_to_validate = {'service_id': uuid.uuid4().hex, 'interface': 'public'} for url in _INVALID_URLS: request_to_validate['url'] = url self.assertRaises(exception.SchemaValidationError, self.update_endpoint_validator.validate, request_to_validate) def test_validate_endpoint_update_fails_with_invalid_region_id(self): """Exception raised when passing invalid `region(_id)` in request.""" request_to_validate = {'interface': 'admin', 'region_id': 1234, 'service_id': uuid.uuid4().hex, 'url': 'https://service.example.com:5000/'} self.assertRaises(exception.SchemaValidationError, self.update_endpoint_validator.validate, request_to_validate) request_to_validate = {'interface': 'admin', 'region': 1234, 'service_id': uuid.uuid4().hex, 'url': 'https://service.example.com:5000/'} self.assertRaises(exception.SchemaValidationError, self.update_endpoint_validator.validate, request_to_validate) class EndpointGroupValidationTestCase(unit.BaseTestCase): """Test for V3 Endpoint Group API validation.""" def setUp(self): super(EndpointGroupValidationTestCase, self).setUp() create = catalog_schema.endpoint_group_create update = catalog_schema.endpoint_group_update self.create_endpoint_grp_validator = validators.SchemaValidator(create) self.update_endpoint_grp_validator = validators.SchemaValidator(update) def test_validate_endpoint_group_request_succeeds(self): """Test that we validate an endpoint group request.""" request_to_validate = {'description': 'endpoint group description', 'filters': {'interface': 'admin'}, 'name': 'endpoint_group_name'} self.create_endpoint_grp_validator.validate(request_to_validate) def test_validate_endpoint_group_create_succeeds_with_req_parameters(self): """Validate required endpoint group parameters. This test ensure that validation succeeds with only the required parameters passed for creating an endpoint group. """ request_to_validate = {'filters': {'interface': 'admin'}, 'name': 'endpoint_group_name'} self.create_endpoint_grp_validator.validate(request_to_validate) def test_validate_endpoint_group_create_succeeds_with_valid_filters(self): """Validate `filters` in endpoint group create requests.""" request_to_validate = {'description': 'endpoint group description', 'name': 'endpoint_group_name'} for valid_filters in _VALID_FILTERS: request_to_validate['filters'] = valid_filters self.create_endpoint_grp_validator.validate(request_to_validate) def test_validate_create_endpoint_group_fails_with_invalid_filters(self): """Validate invalid `filters` value in endpoint group parameters. This test ensures that exception is raised when non-dict values is used as `filters` in endpoint group create request. """ request_to_validate = {'description': 'endpoint group description', 'name': 'endpoint_group_name'} for invalid_filters in _INVALID_FILTERS: request_to_validate['filters'] = invalid_filters self.assertRaises(exception.SchemaValidationError, self.create_endpoint_grp_validator.validate, request_to_validate) def test_validate_endpoint_group_create_fails_without_name(self): """Exception raised when `name` isn't in endpoint group request.""" request_to_validate = {'description': 'endpoint group description', 'filters': {'interface': 'admin'}} self.assertRaises(exception.SchemaValidationError, self.create_endpoint_grp_validator.validate, request_to_validate) def test_validate_endpoint_group_create_fails_without_filters(self): """Exception raised when `filters` isn't in endpoint group request.""" request_to_validate = {'description': 'endpoint group description', 'name': 'endpoint_group_name'} self.assertRaises(exception.SchemaValidationError, self.create_endpoint_grp_validator.validate, request_to_validate) def test_validate_endpoint_group_update_request_succeeds(self): """Test that we validate an endpoint group update request.""" request_to_validate = {'description': 'endpoint group description', 'filters': {'interface': 'admin'}, 'name': 'endpoint_group_name'} self.update_endpoint_grp_validator.validate(request_to_validate) def test_validate_endpoint_group_update_fails_with_no_parameters(self): """Exception raised when no parameters on endpoint group update.""" request_to_validate = {} self.assertRaises(exception.SchemaValidationError, self.update_endpoint_grp_validator.validate, request_to_validate) def test_validate_endpoint_group_update_succeeds_with_name(self): """Validate request with only `name` in endpoint group update. This test ensures that passing only a `name` passes validation on update endpoint group request. """ request_to_validate = {'name': 'endpoint_group_name'} self.update_endpoint_grp_validator.validate(request_to_validate) def test_validate_endpoint_group_update_succeeds_with_valid_filters(self): """Validate `filters` as dict values.""" for valid_filters in _VALID_FILTERS: request_to_validate = {'filters': valid_filters} self.update_endpoint_grp_validator.validate(request_to_validate) def test_validate_endpoint_group_update_fails_with_invalid_filters(self): """Exception raised when passing invalid `filters` in request.""" for invalid_filters in _INVALID_FILTERS: request_to_validate = {'filters': invalid_filters} self.assertRaises(exception.SchemaValidationError, self.update_endpoint_grp_validator.validate, request_to_validate) class TrustValidationTestCase(unit.BaseTestCase): """Test for V3 Trust API validation.""" _valid_roles = ['member', uuid.uuid4().hex, str(uuid.uuid4())] _invalid_roles = [False, True, 123, None] def setUp(self): super(TrustValidationTestCase, self).setUp() create = trust_schema.trust_create self.create_trust_validator = validators.SchemaValidator(create) def test_validate_trust_succeeds(self): """Test that we can validate a trust request.""" request_to_validate = {'trustor_user_id': uuid.uuid4().hex, 'trustee_user_id': uuid.uuid4().hex, 'impersonation': False} self.create_trust_validator.validate(request_to_validate) def test_validate_trust_with_all_parameters_succeeds(self): """Test that we can validate a trust request with all parameters.""" request_to_validate = {'trustor_user_id': uuid.uuid4().hex, 'trustee_user_id': uuid.uuid4().hex, 'impersonation': False, 'project_id': uuid.uuid4().hex, 'roles': [uuid.uuid4().hex, uuid.uuid4().hex], 'expires_at': 'some timestamp', 'remaining_uses': 2} self.create_trust_validator.validate(request_to_validate) def test_validate_trust_without_trustor_id_fails(self): """Validate trust request fails without `trustor_id`.""" request_to_validate = {'trustee_user_id': uuid.uuid4().hex, 'impersonation': False} self.assertRaises(exception.SchemaValidationError, self.create_trust_validator.validate, request_to_validate) def test_validate_trust_without_trustee_id_fails(self): """Validate trust request fails without `trustee_id`.""" request_to_validate = {'trusor_user_id': uuid.uuid4().hex, 'impersonation': False} self.assertRaises(exception.SchemaValidationError, self.create_trust_validator.validate, request_to_validate) def test_validate_trust_without_impersonation_fails(self): """Validate trust request fails without `impersonation`.""" request_to_validate = {'trustee_user_id': uuid.uuid4().hex, 'trustor_user_id': uuid.uuid4().hex} self.assertRaises(exception.SchemaValidationError, self.create_trust_validator.validate, request_to_validate) def test_validate_trust_with_extra_parameters_succeeds(self): """Test that we can validate a trust request with extra parameters.""" request_to_validate = {'trustor_user_id': uuid.uuid4().hex, 'trustee_user_id': uuid.uuid4().hex, 'impersonation': False, 'project_id': uuid.uuid4().hex, 'roles': [uuid.uuid4().hex, uuid.uuid4().hex], 'expires_at': 'some timestamp', 'remaining_uses': 2, 'extra': 'something extra!'} self.create_trust_validator.validate(request_to_validate) def test_validate_trust_with_invalid_impersonation_fails(self): """Validate trust request with invalid `impersonation` fails.""" request_to_validate = {'trustor_user_id': uuid.uuid4().hex, 'trustee_user_id': uuid.uuid4().hex, 'impersonation': 2} self.assertRaises(exception.SchemaValidationError, self.create_trust_validator.validate, request_to_validate) def test_validate_trust_with_null_remaining_uses_succeeds(self): """Validate trust request with null `remaining_uses`.""" request_to_validate = {'trustor_user_id': uuid.uuid4().hex, 'trustee_user_id': uuid.uuid4().hex, 'impersonation': False, 'remaining_uses': None} self.create_trust_validator.validate(request_to_validate) def test_validate_trust_with_remaining_uses_succeeds(self): """Validate trust request with `remaining_uses` succeeds.""" request_to_validate = {'trustor_user_id': uuid.uuid4().hex, 'trustee_user_id': uuid.uuid4().hex, 'impersonation': False, 'remaining_uses': 2} self.create_trust_validator.validate(request_to_validate) def test_validate_trust_with_period_in_user_id_string(self): """Validate trust request with a period in the user id string.""" request_to_validate = {'trustor_user_id': 'john.smith', 'trustee_user_id': 'joe.developer', 'impersonation': False} self.create_trust_validator.validate(request_to_validate) def test_validate_trust_with_invalid_expires_at_fails(self): """Validate trust request with invalid `expires_at` fails.""" request_to_validate = {'trustor_user_id': uuid.uuid4().hex, 'trustee_user_id': uuid.uuid4().hex, 'impersonation': False, 'expires_at': 3} self.assertRaises(exception.SchemaValidationError, self.create_trust_validator.validate, request_to_validate) def test_validate_trust_with_role_types_succeeds(self): """Validate trust request with `roles` succeeds.""" for role in self._valid_roles: request_to_validate = {'trustor_user_id': uuid.uuid4().hex, 'trustee_user_id': uuid.uuid4().hex, 'impersonation': False, 'roles': [role]} self.create_trust_validator.validate(request_to_validate) def test_validate_trust_with_invalid_role_type_fails(self): """Validate trust request with invalid `roles` fails.""" for role in self._invalid_roles: request_to_validate = {'trustor_user_id': uuid.uuid4().hex, 'trustee_user_id': uuid.uuid4().hex, 'impersonation': False, 'roles': role} self.assertRaises(exception.SchemaValidationError, self.create_trust_validator.validate, request_to_validate) def test_validate_trust_with_list_of_valid_roles_succeeds(self): """Validate trust request with a list of valid `roles`.""" request_to_validate = {'trustor_user_id': uuid.uuid4().hex, 'trustee_user_id': uuid.uuid4().hex, 'impersonation': False, 'roles': self._valid_roles} self.create_trust_validator.validate(request_to_validate) class ServiceProviderValidationTestCase(unit.BaseTestCase): """Test for V3 Service Provider API validation.""" def setUp(self): super(ServiceProviderValidationTestCase, self).setUp() self.valid_auth_url = 'https://' + uuid.uuid4().hex + '.com' self.valid_sp_url = 'https://' + uuid.uuid4().hex + '.com' create = federation_schema.service_provider_create update = federation_schema.service_provider_update self.create_sp_validator = validators.SchemaValidator(create) self.update_sp_validator = validators.SchemaValidator(update) def test_validate_sp_request(self): """Test that we validate `auth_url` and `sp_url` in request.""" request_to_validate = { 'auth_url': self.valid_auth_url, 'sp_url': self.valid_sp_url } self.create_sp_validator.validate(request_to_validate) def test_validate_sp_request_with_invalid_auth_url_fails(self): """Validate request fails with invalid `auth_url`.""" request_to_validate = { 'auth_url': uuid.uuid4().hex, 'sp_url': self.valid_sp_url } self.assertRaises(exception.SchemaValidationError, self.create_sp_validator.validate, request_to_validate) def test_validate_sp_request_with_invalid_sp_url_fails(self): """Validate request fails with invalid `sp_url`.""" request_to_validate = { 'auth_url': self.valid_auth_url, 'sp_url': uuid.uuid4().hex, } self.assertRaises(exception.SchemaValidationError, self.create_sp_validator.validate, request_to_validate) def test_validate_sp_request_without_auth_url_fails(self): """Validate request fails without `auth_url`.""" request_to_validate = { 'sp_url': self.valid_sp_url } self.assertRaises(exception.SchemaValidationError, self.create_sp_validator.validate, request_to_validate) request_to_validate = { 'auth_url': None, 'sp_url': self.valid_sp_url } self.assertRaises(exception.SchemaValidationError, self.create_sp_validator.validate, request_to_validate) def test_validate_sp_request_without_sp_url_fails(self): """Validate request fails without `sp_url`.""" request_to_validate = { 'auth_url': self.valid_auth_url, } self.assertRaises(exception.SchemaValidationError, self.create_sp_validator.validate, request_to_validate) request_to_validate = { 'auth_url': self.valid_auth_url, 'sp_url': None, } self.assertRaises(exception.SchemaValidationError, self.create_sp_validator.validate, request_to_validate) def test_validate_sp_request_with_enabled(self): """Validate `enabled` as boolean-like values.""" for valid_enabled in _VALID_ENABLED_FORMATS: request_to_validate = { 'auth_url': self.valid_auth_url, 'sp_url': self.valid_sp_url, 'enabled': valid_enabled } self.create_sp_validator.validate(request_to_validate) def test_validate_sp_request_with_invalid_enabled_fails(self): """Exception is raised when `enabled` isn't a boolean-like value.""" for invalid_enabled in _INVALID_ENABLED_FORMATS: request_to_validate = { 'auth_url': self.valid_auth_url, 'sp_url': self.valid_sp_url, 'enabled': invalid_enabled } self.assertRaises(exception.SchemaValidationError, self.create_sp_validator.validate, request_to_validate) def test_validate_sp_request_with_valid_description(self): """Test that we validate `description` in create requests.""" request_to_validate = { 'auth_url': self.valid_auth_url, 'sp_url': self.valid_sp_url, 'description': 'My Service Provider' } self.create_sp_validator.validate(request_to_validate) def test_validate_sp_request_with_invalid_description_fails(self): """Exception is raised when `description` as a non-string value.""" request_to_validate = { 'auth_url': self.valid_auth_url, 'sp_url': self.valid_sp_url, 'description': False } self.assertRaises(exception.SchemaValidationError, self.create_sp_validator.validate, request_to_validate) def test_validate_sp_request_with_extra_field_fails(self): """Exception raised when passing extra fields in the body.""" # 'id' can't be passed in the body since it is passed in the URL request_to_validate = { 'id': 'ACME', 'auth_url': self.valid_auth_url, 'sp_url': self.valid_sp_url, 'description': 'My Service Provider' } self.assertRaises(exception.SchemaValidationError, self.create_sp_validator.validate, request_to_validate) def test_validate_sp_update_request(self): """Test that we validate a update request.""" request_to_validate = {'description': uuid.uuid4().hex} self.update_sp_validator.validate(request_to_validate) def test_validate_sp_update_request_with_no_parameters_fails(self): """Exception is raised when updating without parameters.""" request_to_validate = {} self.assertRaises(exception.SchemaValidationError, self.update_sp_validator.validate, request_to_validate) def test_validate_sp_update_request_with_invalid_auth_url_fails(self): """Exception raised when updating with invalid `auth_url`.""" request_to_validate = {'auth_url': uuid.uuid4().hex} self.assertRaises(exception.SchemaValidationError, self.update_sp_validator.validate, request_to_validate) request_to_validate = {'auth_url': None} self.assertRaises(exception.SchemaValidationError, self.update_sp_validator.validate, request_to_validate) def test_validate_sp_update_request_with_invalid_sp_url_fails(self): """Exception raised when updating with invalid `sp_url`.""" request_to_validate = {'sp_url': uuid.uuid4().hex} self.assertRaises(exception.SchemaValidationError, self.update_sp_validator.validate, request_to_validate) request_to_validate = {'sp_url': None} self.assertRaises(exception.SchemaValidationError, self.update_sp_validator.validate, request_to_validate) class UserValidationTestCase(unit.BaseTestCase): """Test for V3 User API validation.""" def setUp(self): super(UserValidationTestCase, self).setUp() self.user_name = uuid.uuid4().hex create = identity_schema.user_create update = identity_schema.user_update self.create_user_validator = validators.SchemaValidator(create) self.update_user_validator = validators.SchemaValidator(update) def test_validate_user_create_request_succeeds(self): """Test that validating a user create request succeeds.""" request_to_validate = {'name': self.user_name} self.create_user_validator.validate(request_to_validate) def test_validate_user_create_with_all_valid_parameters_succeeds(self): """Test that validating a user create request succeeds.""" request_to_validate = unit.new_user_ref(domain_id=uuid.uuid4().hex, name=self.user_name) self.create_user_validator.validate(request_to_validate) def test_validate_user_create_fails_without_name(self): """Exception raised when validating a user without name.""" request_to_validate = {'email': uuid.uuid4().hex} self.assertRaises(exception.SchemaValidationError, self.create_user_validator.validate, request_to_validate) def test_validate_user_create_fails_with_name_of_zero_length(self): """Exception raised when validating a username with length of zero.""" request_to_validate = {'name': ''} self.assertRaises(exception.SchemaValidationError, self.create_user_validator.validate, request_to_validate) def test_validate_user_create_fails_with_name_of_wrong_type(self): """Exception raised when validating a username of wrong type.""" request_to_validate = {'name': True} self.assertRaises(exception.SchemaValidationError, self.create_user_validator.validate, request_to_validate) def test_validate_user_create_succeeds_with_valid_enabled_formats(self): """Validate acceptable enabled formats in create user requests.""" for enabled in _VALID_ENABLED_FORMATS: request_to_validate = {'name': self.user_name, 'enabled': enabled} self.create_user_validator.validate(request_to_validate) def test_validate_user_create_fails_with_invalid_enabled_formats(self): """Exception raised when enabled is not an acceptable format.""" for invalid_enabled in _INVALID_ENABLED_FORMATS: request_to_validate = {'name': self.user_name, 'enabled': invalid_enabled} self.assertRaises(exception.SchemaValidationError, self.create_user_validator.validate, request_to_validate) def test_validate_user_create_succeeds_with_extra_attributes(self): """Validate extra parameters on user create requests.""" request_to_validate = {'name': self.user_name, 'other_attr': uuid.uuid4().hex} self.create_user_validator.validate(request_to_validate) def test_validate_user_create_succeeds_with_password_of_zero_length(self): """Validate empty password on user create requests.""" request_to_validate = {'name': self.user_name, 'password': ''} self.create_user_validator.validate(request_to_validate) def test_validate_user_create_succeeds_with_null_password(self): """Validate that password is nullable on create user.""" request_to_validate = {'name': self.user_name, 'password': None} self.create_user_validator.validate(request_to_validate) def test_validate_user_create_fails_with_invalid_password_type(self): """Exception raised when user password is of the wrong type.""" request_to_validate = {'name': self.user_name, 'password': True} self.assertRaises(exception.SchemaValidationError, self.create_user_validator.validate, request_to_validate) def test_validate_user_create_succeeds_with_null_description(self): """Validate that description can be nullable on create user.""" request_to_validate = {'name': self.user_name, 'description': None} self.create_user_validator.validate(request_to_validate) def test_validate_user_update_succeeds(self): """Validate an update user request.""" request_to_validate = {'email': uuid.uuid4().hex} self.update_user_validator.validate(request_to_validate) def test_validate_user_update_fails_with_no_parameters(self): """Exception raised when updating nothing.""" request_to_validate = {} self.assertRaises(exception.SchemaValidationError, self.update_user_validator.validate, request_to_validate) def test_validate_user_update_succeeds_with_extra_parameters(self): """Validate user update requests with extra parameters.""" request_to_validate = {'other_attr': uuid.uuid4().hex} self.update_user_validator.validate(request_to_validate) class GroupValidationTestCase(unit.BaseTestCase): """Test for V3 Group API validation.""" def setUp(self): super(GroupValidationTestCase, self).setUp() self.group_name = uuid.uuid4().hex create = identity_schema.group_create update = identity_schema.group_update self.create_group_validator = validators.SchemaValidator(create) self.update_group_validator = validators.SchemaValidator(update) def test_validate_group_create_succeeds(self): """Validate create group requests.""" request_to_validate = {'name': self.group_name} self.create_group_validator.validate(request_to_validate) def test_validate_group_create_succeeds_with_all_parameters(self): """Validate create group requests with all parameters.""" request_to_validate = {'name': self.group_name, 'description': uuid.uuid4().hex, 'domain_id': uuid.uuid4().hex} self.create_group_validator.validate(request_to_validate) def test_validate_group_create_fails_without_group_name(self): """Exception raised when group name is not provided in request.""" request_to_validate = {'description': uuid.uuid4().hex} self.assertRaises(exception.SchemaValidationError, self.create_group_validator.validate, request_to_validate) def test_validate_group_create_fails_when_group_name_is_too_short(self): """Exception raised when group name is equal to zero.""" request_to_validate = {'name': ''} self.assertRaises(exception.SchemaValidationError, self.create_group_validator.validate, request_to_validate) def test_validate_group_create_succeeds_with_extra_parameters(self): """Validate extra attributes on group create requests.""" request_to_validate = {'name': self.group_name, 'other_attr': uuid.uuid4().hex} self.create_group_validator.validate(request_to_validate) def test_validate_group_update_succeeds(self): """Validate group update requests.""" request_to_validate = {'description': uuid.uuid4().hex} self.update_group_validator.validate(request_to_validate) def test_validate_group_update_fails_with_no_parameters(self): """Exception raised when no parameters passed in on update.""" request_to_validate = {} self.assertRaises(exception.SchemaValidationError, self.update_group_validator.validate, request_to_validate) def test_validate_group_update_succeeds_with_extra_parameters(self): """Validate group update requests with extra parameters.""" request_to_validate = {'other_attr': uuid.uuid4().hex} self.update_group_validator.validate(request_to_validate) class IdentityProviderValidationTestCase(unit.BaseTestCase): """Test for V3 Identity Provider API validation.""" def setUp(self): super(IdentityProviderValidationTestCase, self).setUp() create = federation_schema.identity_provider_create update = federation_schema.identity_provider_update self.create_idp_validator = validators.SchemaValidator(create) self.update_idp_validator = validators.SchemaValidator(update) def test_validate_idp_request_succeeds(self): """Test that we validate an identity provider request.""" request_to_validate = {'description': 'identity provider description', 'enabled': True, 'remote_ids': [uuid.uuid4().hex, uuid.uuid4().hex]} self.create_idp_validator.validate(request_to_validate) self.update_idp_validator.validate(request_to_validate) def test_validate_idp_request_fails_with_invalid_params(self): """Exception raised when unknown parameter is found.""" request_to_validate = {'bogus': uuid.uuid4().hex} self.assertRaises(exception.SchemaValidationError, self.create_idp_validator.validate, request_to_validate) self.assertRaises(exception.SchemaValidationError, self.update_idp_validator.validate, request_to_validate) def test_validate_idp_request_with_enabled(self): """Validate `enabled` as boolean-like values.""" for valid_enabled in _VALID_ENABLED_FORMATS: request_to_validate = {'enabled': valid_enabled} self.create_idp_validator.validate(request_to_validate) self.update_idp_validator.validate(request_to_validate) def test_validate_idp_request_with_invalid_enabled_fails(self): """Exception is raised when `enabled` isn't a boolean-like value.""" for invalid_enabled in _INVALID_ENABLED_FORMATS: request_to_validate = {'enabled': invalid_enabled} self.assertRaises(exception.SchemaValidationError, self.create_idp_validator.validate, request_to_validate) self.assertRaises(exception.SchemaValidationError, self.update_idp_validator.validate, request_to_validate) def test_validate_idp_request_no_parameters(self): """Test that schema validation with empty request body.""" request_to_validate = {} self.create_idp_validator.validate(request_to_validate) # Exception raised when no property on IdP update. self.assertRaises(exception.SchemaValidationError, self.update_idp_validator.validate, request_to_validate) def test_validate_idp_request_with_invalid_description_fails(self): """Exception is raised when `description` as a non-string value.""" request_to_validate = {'description': False} self.assertRaises(exception.SchemaValidationError, self.create_idp_validator.validate, request_to_validate) self.assertRaises(exception.SchemaValidationError, self.update_idp_validator.validate, request_to_validate) def test_validate_idp_request_with_invalid_remote_id_fails(self): """Exception is raised when `remote_ids` is not a array.""" request_to_validate = {"remote_ids": uuid.uuid4().hex} self.assertRaises(exception.SchemaValidationError, self.create_idp_validator.validate, request_to_validate) self.assertRaises(exception.SchemaValidationError, self.update_idp_validator.validate, request_to_validate) def test_validate_idp_request_with_duplicated_remote_id(self): """Exception is raised when the duplicated `remote_ids` is found.""" idp_id = uuid.uuid4().hex request_to_validate = {"remote_ids": [idp_id, idp_id]} self.assertRaises(exception.SchemaValidationError, self.create_idp_validator.validate, request_to_validate) self.assertRaises(exception.SchemaValidationError, self.update_idp_validator.validate, request_to_validate) def test_validate_idp_request_remote_id_nullable(self): """Test that `remote_ids` could be explicitly set to None""" request_to_validate = {'remote_ids': None} self.create_idp_validator.validate(request_to_validate) self.update_idp_validator.validate(request_to_validate) class FederationProtocolValidationTestCase(unit.BaseTestCase): """Test for V3 Federation Protocol API validation.""" def setUp(self): super(FederationProtocolValidationTestCase, self).setUp() schema = federation_schema.federation_protocol_schema # create protocol and update protocol have the same shema definition, # combine them together, no need to validate separately. self.protocol_validator = validators.SchemaValidator(schema) def test_validate_protocol_request_succeeds(self): """Test that we validate a protocol request successfully.""" request_to_validate = {'mapping_id': uuid.uuid4().hex} self.protocol_validator.validate(request_to_validate) def test_validate_protocol_request_succeeds_with_nonuuid_mapping_id(self): """Test that we allow underscore in mapping_id value.""" request_to_validate = {'mapping_id': 'my_mapping_id'} self.protocol_validator.validate(request_to_validate) def test_validate_protocol_request_fails_with_invalid_params(self): """Exception raised when unknown parameter is found.""" request_to_validate = {'bogus': uuid.uuid4().hex} self.assertRaises(exception.SchemaValidationError, self.protocol_validator.validate, request_to_validate) def test_validate_protocol_request_no_parameters(self): """Test that schema validation with empty request body.""" request_to_validate = {} # 'mapping_id' is required. self.assertRaises(exception.SchemaValidationError, self.protocol_validator.validate, request_to_validate) def test_validate_protocol_request_fails_with_invalid_mapping_id(self): """Exception raised when mapping_id is not string.""" request_to_validate = {'mapping_id': 12334} self.assertRaises(exception.SchemaValidationError, self.protocol_validator.validate, request_to_validate) class OAuth1ValidationTestCase(unit.BaseTestCase): """Test for V3 Identity OAuth1 API validation.""" def setUp(self): super(OAuth1ValidationTestCase, self).setUp() create = oauth1_schema.consumer_create update = oauth1_schema.consumer_update self.create_consumer_validator = validators.SchemaValidator(create) self.update_consumer_validator = validators.SchemaValidator(update) def test_validate_consumer_request_succeeds(self): """Test that we validate a consumer request successfully.""" request_to_validate = {'description': uuid.uuid4().hex, 'name': uuid.uuid4().hex} self.create_consumer_validator.validate(request_to_validate) self.update_consumer_validator.validate(request_to_validate) def test_validate_consumer_request_with_no_parameters(self): """Test that schema validation with empty request body.""" request_to_validate = {} self.create_consumer_validator.validate(request_to_validate) # At least one property should be given. self.assertRaises(exception.SchemaValidationError, self.update_consumer_validator.validate, request_to_validate) def test_validate_consumer_request_with_invalid_description_fails(self): """Exception is raised when `description` as a non-string value.""" for invalid_desc in _INVALID_DESC_FORMATS: request_to_validate = {'description': invalid_desc} self.assertRaises(exception.SchemaValidationError, self.create_consumer_validator.validate, request_to_validate) self.assertRaises(exception.SchemaValidationError, self.update_consumer_validator.validate, request_to_validate) def test_validate_update_consumer_request_fails_with_secret(self): """Exception raised when secret is given.""" request_to_validate = {'secret': uuid.uuid4().hex} self.assertRaises(exception.SchemaValidationError, self.update_consumer_validator.validate, request_to_validate) def test_validate_consumer_request_with_none_desc(self): """Test that schema validation with None desc.""" request_to_validate = {'description': None} self.create_consumer_validator.validate(request_to_validate) self.update_consumer_validator.validate(request_to_validate) keystone-9.0.0/keystone/tests/unit/test_sql_upgrade.py0000664000567000056710000014230412701407105024424 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ To run these tests against a live database: 1. Modify the file ``keystone/tests/unit/config_files/backend_sql.conf`` to use the connection for your live database. 2. Set up a blank, live database 3. Run the tests using:: tox -e py27 -- keystone.tests.unit.test_sql_upgrade WARNING:: Your database will be wiped. Do not do this against a database with valuable data as all data will be lost. """ import json import uuid import migrate from migrate.versioning import api as versioning_api from migrate.versioning import repository import mock from oslo_config import cfg from oslo_db import exception as db_exception from oslo_db.sqlalchemy import migration from oslo_db.sqlalchemy import session as db_session from sqlalchemy.engine import reflection import sqlalchemy.exc from sqlalchemy import schema from testtools import matchers from keystone.common import sql from keystone.common.sql import migration_helpers from keystone import exception from keystone.tests import unit from keystone.tests.unit import default_fixtures from keystone.tests.unit.ksfixtures import database CONF = cfg.CONF # NOTE(morganfainberg): This should be updated when each DB migration collapse # is done to mirror the expected structure of the DB in the format of # { : [, , ...], ... } INITIAL_TABLE_STRUCTURE = { 'credential': [ 'id', 'user_id', 'project_id', 'blob', 'type', 'extra', ], 'domain': [ 'id', 'name', 'enabled', 'extra', ], 'endpoint': [ 'id', 'legacy_endpoint_id', 'interface', 'region_id', 'service_id', 'url', 'enabled', 'extra', ], 'group': [ 'id', 'domain_id', 'name', 'description', 'extra', ], 'policy': [ 'id', 'type', 'blob', 'extra', ], 'project': [ 'id', 'name', 'extra', 'description', 'enabled', 'domain_id', 'parent_id', ], 'role': [ 'id', 'name', 'extra', ], 'service': [ 'id', 'type', 'extra', 'enabled', ], 'token': [ 'id', 'expires', 'extra', 'valid', 'trust_id', 'user_id', ], 'trust': [ 'id', 'trustor_user_id', 'trustee_user_id', 'project_id', 'impersonation', 'deleted_at', 'expires_at', 'remaining_uses', 'extra', ], 'trust_role': [ 'trust_id', 'role_id', ], 'user': [ 'id', 'name', 'extra', 'password', 'enabled', 'domain_id', 'default_project_id', ], 'user_group_membership': [ 'user_id', 'group_id', ], 'region': [ 'id', 'description', 'parent_region_id', 'extra', ], 'assignment': [ 'type', 'actor_id', 'target_id', 'role_id', 'inherited', ], 'id_mapping': [ 'public_id', 'domain_id', 'local_id', 'entity_type', ], 'whitelisted_config': [ 'domain_id', 'group', 'option', 'value', ], 'sensitive_config': [ 'domain_id', 'group', 'option', 'value', ], } # Test migration_helpers.get_init_version separately to ensure it works before # using in the SqlUpgrade tests. class MigrationHelpersGetInitVersionTests(unit.TestCase): @mock.patch.object(repository, 'Repository') def test_get_init_version_no_path(self, repo): migrate_versions = mock.MagicMock() # make a version list starting with zero. `get_init_version` will # return None for this value. migrate_versions.versions.versions = list(range(0, 5)) repo.return_value = migrate_versions # os.path.isdir() is called by `find_migrate_repo()`. Mock it to avoid # an exception. with mock.patch('os.path.isdir', return_value=True): # since 0 is the smallest version expect None version = migration_helpers.get_init_version() self.assertIsNone(version) # check that the default path was used as the first argument to the # first invocation of repo. Cannot match the full path because it is # based on where the test is run. param = repo.call_args_list[0][0][0] self.assertTrue(param.endswith('/sql/migrate_repo')) @mock.patch.object(repository, 'Repository') def test_get_init_version_with_path_initial_version_0(self, repo): migrate_versions = mock.MagicMock() # make a version list starting with zero. `get_init_version` will # return None for this value. migrate_versions.versions.versions = list(range(0, 5)) repo.return_value = migrate_versions # os.path.isdir() is called by `find_migrate_repo()`. Mock it to avoid # an exception. with mock.patch('os.path.isdir', return_value=True): path = '/keystone/migrate_repo/' # since 0 is the smallest version expect None version = migration_helpers.get_init_version(abs_path=path) self.assertIsNone(version) @mock.patch.object(repository, 'Repository') def test_get_init_version_with_path(self, repo): initial_version = 10 migrate_versions = mock.MagicMock() migrate_versions.versions.versions = list(range(initial_version + 1, initial_version + 5)) repo.return_value = migrate_versions # os.path.isdir() is called by `find_migrate_repo()`. Mock it to avoid # an exception. with mock.patch('os.path.isdir', return_value=True): path = '/keystone/migrate_repo/' version = migration_helpers.get_init_version(abs_path=path) self.assertEqual(initial_version, version) class SqlMigrateBase(unit.SQLDriverOverrides, unit.TestCase): # override this in subclasses. The default of zero covers tests such # as extensions upgrades. _initial_db_version = 0 def initialize_sql(self): self.metadata = sqlalchemy.MetaData() self.metadata.bind = self.engine def config_files(self): config_files = super(SqlMigrateBase, self).config_files() config_files.append(unit.dirs.tests_conf('backend_sql.conf')) return config_files def repo_package(self): return sql def setUp(self): super(SqlMigrateBase, self).setUp() self.load_backends() database.initialize_sql_session() conn_str = CONF.database.connection if (conn_str != unit.IN_MEM_DB_CONN_STRING and conn_str.startswith('sqlite') and conn_str[10:] == unit.DEFAULT_TEST_DB_FILE): # Override the default with a DB that is specific to the migration # tests only if the DB Connection string is the same as the global # default. This is required so that no conflicts occur due to the # global default DB already being under migrate control. This is # only needed if the DB is not-in-memory db_file = unit.dirs.tmp('keystone_migrate_test.db') self.config_fixture.config( group='database', connection='sqlite:///%s' % db_file) # create and share a single sqlalchemy engine for testing with sql.session_for_write() as session: self.engine = session.get_bind() self.addCleanup(self.cleanup_instance('engine')) self.Session = db_session.get_maker(self.engine, autocommit=False) self.addCleanup(sqlalchemy.orm.session.Session.close_all) self.initialize_sql() self.repo_path = migration_helpers.find_migrate_repo( self.repo_package()) self.schema = versioning_api.ControlledSchema.create( self.engine, self.repo_path, self._initial_db_version) # auto-detect the highest available schema version in the migrate_repo self.max_version = self.schema.repository.version().version self.addCleanup(sql.cleanup) # drop tables and FKs. self.addCleanup(self._cleanupDB) def _cleanupDB(self): meta = sqlalchemy.MetaData() meta.bind = self.engine meta.reflect(self.engine) with self.engine.begin() as conn: inspector = reflection.Inspector.from_engine(self.engine) metadata = schema.MetaData() tbs = [] all_fks = [] for table_name in inspector.get_table_names(): fks = [] for fk in inspector.get_foreign_keys(table_name): if not fk['name']: continue fks.append( schema.ForeignKeyConstraint((), (), name=fk['name'])) table = schema.Table(table_name, metadata, *fks) tbs.append(table) all_fks.extend(fks) for fkc in all_fks: if self.engine.name != 'sqlite': conn.execute(schema.DropConstraint(fkc)) for table in tbs: conn.execute(schema.DropTable(table)) def select_table(self, name): table = sqlalchemy.Table(name, self.metadata, autoload=True) s = sqlalchemy.select([table]) return s def assertTableExists(self, table_name): try: self.select_table(table_name) except sqlalchemy.exc.NoSuchTableError: raise AssertionError('Table "%s" does not exist' % table_name) def assertTableDoesNotExist(self, table_name): """Asserts that a given table exists cannot be selected by name.""" # Switch to a different metadata otherwise you might still # detect renamed or dropped tables try: temp_metadata = sqlalchemy.MetaData() temp_metadata.bind = self.engine sqlalchemy.Table(table_name, temp_metadata, autoload=True) except sqlalchemy.exc.NoSuchTableError: pass else: raise AssertionError('Table "%s" already exists' % table_name) def assertTableCountsMatch(self, table1_name, table2_name): try: table1 = self.select_table(table1_name) except sqlalchemy.exc.NoSuchTableError: raise AssertionError('Table "%s" does not exist' % table1_name) try: table2 = self.select_table(table2_name) except sqlalchemy.exc.NoSuchTableError: raise AssertionError('Table "%s" does not exist' % table2_name) session = self.Session() table1_count = session.execute(table1.count()).scalar() table2_count = session.execute(table2.count()).scalar() if table1_count != table2_count: raise AssertionError('Table counts do not match: {0} ({1}), {2} ' '({3})'.format(table1_name, table1_count, table2_name, table2_count)) def upgrade(self, *args, **kwargs): self._migrate(*args, **kwargs) def _migrate(self, version, repository=None, downgrade=False, current_schema=None): repository = repository or self.repo_path err = '' version = versioning_api._migrate_version(self.schema, version, not downgrade, err) if not current_schema: current_schema = self.schema changeset = current_schema.changeset(version) for ver, change in changeset: self.schema.runchange(ver, change, changeset.step) self.assertEqual(self.schema.version, version) def assertTableColumns(self, table_name, expected_cols): """Asserts that the table contains the expected set of columns.""" self.initialize_sql() table = self.select_table(table_name) actual_cols = [col.name for col in table.columns] # Check if the columns are equal, but allow for a different order, # which might occur after an upgrade followed by a downgrade self.assertItemsEqual(expected_cols, actual_cols, '%s table' % table_name) class SqlUpgradeTests(SqlMigrateBase): _initial_db_version = migration_helpers.get_init_version() def test_blank_db_to_start(self): self.assertTableDoesNotExist('user') def test_start_version_db_init_version(self): with sql.session_for_write() as session: version = migration.db_version(session.get_bind(), self.repo_path, self._initial_db_version) self.assertEqual( self._initial_db_version, version, 'DB is not at version %s' % self._initial_db_version) def test_upgrade_add_initial_tables(self): self.upgrade(self._initial_db_version + 1) self.check_initial_table_structure() def check_initial_table_structure(self): for table in INITIAL_TABLE_STRUCTURE: self.assertTableColumns(table, INITIAL_TABLE_STRUCTURE[table]) def insert_dict(self, session, table_name, d, table=None): """Naively inserts key-value pairs into a table, given a dictionary.""" if table is None: this_table = sqlalchemy.Table(table_name, self.metadata, autoload=True) else: this_table = table insert = this_table.insert().values(**d) session.execute(insert) session.commit() def test_kilo_squash(self): self.upgrade(67) # In 053 the size of ID and parent region ID columns were changed table = sqlalchemy.Table('region', self.metadata, autoload=True) self.assertEqual(255, table.c.id.type.length) self.assertEqual(255, table.c.parent_region_id.type.length) table = sqlalchemy.Table('endpoint', self.metadata, autoload=True) self.assertEqual(255, table.c.region_id.type.length) # In 054 an index was created for the actor_id of the assignment table table = sqlalchemy.Table('assignment', self.metadata, autoload=True) index_data = [(idx.name, list(idx.columns.keys())) for idx in table.indexes] self.assertIn(('ix_actor_id', ['actor_id']), index_data) # In 055 indexes were created for user and trust IDs in the token table table = sqlalchemy.Table('token', self.metadata, autoload=True) index_data = [(idx.name, list(idx.columns.keys())) for idx in table.indexes] self.assertIn(('ix_token_user_id', ['user_id']), index_data) self.assertIn(('ix_token_trust_id', ['trust_id']), index_data) # In 062 the role ID foreign key was removed from the assignment table if self.engine.name == "mysql": self.assertFalse(self.does_fk_exist('assignment', 'role_id')) # In 064 the domain ID FK was removed from the group and user tables if self.engine.name != 'sqlite': # sqlite does not support FK deletions (or enforcement) self.assertFalse(self.does_fk_exist('group', 'domain_id')) self.assertFalse(self.does_fk_exist('user', 'domain_id')) # In 067 the role ID index was removed from the assignment table if self.engine.name == "mysql": self.assertFalse(self._does_index_exist('assignment', 'assignment_role_id_fkey')) def test_insert_assignment_inherited_pk(self): ASSIGNMENT_TABLE_NAME = 'assignment' INHERITED_COLUMN_NAME = 'inherited' ROLE_TABLE_NAME = 'role' self.upgrade(72) # Check that the 'inherited' column is not part of the PK self.assertFalse(self.does_pk_exist(ASSIGNMENT_TABLE_NAME, INHERITED_COLUMN_NAME)) session = self.Session() role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex} self.insert_dict(session, ROLE_TABLE_NAME, role) # Create both inherited and noninherited role assignments inherited = {'type': 'UserProject', 'actor_id': uuid.uuid4().hex, 'target_id': uuid.uuid4().hex, 'role_id': role['id'], 'inherited': True} noninherited = inherited.copy() noninherited['inherited'] = False # Create another inherited role assignment as a spoiler spoiler = inherited.copy() spoiler['actor_id'] = uuid.uuid4().hex self.insert_dict(session, ASSIGNMENT_TABLE_NAME, inherited) self.insert_dict(session, ASSIGNMENT_TABLE_NAME, spoiler) # Since 'inherited' is not part of the PK, we can't insert noninherited self.assertRaises(db_exception.DBDuplicateEntry, self.insert_dict, session, ASSIGNMENT_TABLE_NAME, noninherited) session.close() self.upgrade(73) session = self.Session() self.metadata.clear() # Check that the 'inherited' column is now part of the PK self.assertTrue(self.does_pk_exist(ASSIGNMENT_TABLE_NAME, INHERITED_COLUMN_NAME)) # The noninherited role assignment can now be inserted self.insert_dict(session, ASSIGNMENT_TABLE_NAME, noninherited) assignment_table = sqlalchemy.Table(ASSIGNMENT_TABLE_NAME, self.metadata, autoload=True) assignments = session.query(assignment_table).all() for assignment in (inherited, spoiler, noninherited): self.assertIn((assignment['type'], assignment['actor_id'], assignment['target_id'], assignment['role_id'], assignment['inherited']), assignments) def does_pk_exist(self, table, pk_column): """Checks whether a column is primary key on a table.""" inspector = reflection.Inspector.from_engine(self.engine) pk_columns = inspector.get_pk_constraint(table)['constrained_columns'] return pk_column in pk_columns def does_fk_exist(self, table, fk_column): inspector = reflection.Inspector.from_engine(self.engine) for fk in inspector.get_foreign_keys(table): if fk_column in fk['constrained_columns']: return True return False def does_index_exist(self, table_name, index_name): meta = sqlalchemy.MetaData(bind=self.engine) table = sqlalchemy.Table(table_name, meta, autoload=True) return index_name in [idx.name for idx in table.indexes] def does_constraint_exist(self, table_name, constraint_name): meta = sqlalchemy.MetaData(bind=self.engine) table = sqlalchemy.Table(table_name, meta, autoload=True) return constraint_name in [con.name for con in table.constraints] def test_endpoint_policy_upgrade(self): self.assertTableDoesNotExist('policy_association') self.upgrade(81) self.assertTableColumns('policy_association', ['id', 'policy_id', 'endpoint_id', 'service_id', 'region_id']) @mock.patch.object(migration_helpers, 'get_db_version', return_value=1) def test_endpoint_policy_already_migrated(self, mock_ep): # By setting the return value to 1, the migration has already been # run, and there's no need to create the table again self.upgrade(81) mock_ep.assert_called_once_with(extension='endpoint_policy', engine=mock.ANY) # It won't exist because we are mocking it, but we can verify # that 081 did not create the table self.assertTableDoesNotExist('policy_association') def test_create_federation_tables(self): self.identity_provider = 'identity_provider' self.federation_protocol = 'federation_protocol' self.service_provider = 'service_provider' self.mapping = 'mapping' self.remote_ids = 'idp_remote_ids' self.assertTableDoesNotExist(self.identity_provider) self.assertTableDoesNotExist(self.federation_protocol) self.assertTableDoesNotExist(self.service_provider) self.assertTableDoesNotExist(self.mapping) self.assertTableDoesNotExist(self.remote_ids) self.upgrade(82) self.assertTableColumns(self.identity_provider, ['id', 'description', 'enabled']) self.assertTableColumns(self.federation_protocol, ['id', 'idp_id', 'mapping_id']) self.assertTableColumns(self.mapping, ['id', 'rules']) self.assertTableColumns(self.service_provider, ['id', 'description', 'enabled', 'auth_url', 'relay_state_prefix', 'sp_url']) self.assertTableColumns(self.remote_ids, ['idp_id', 'remote_id']) federation_protocol = sqlalchemy.Table(self.federation_protocol, self.metadata, autoload=True) self.assertFalse(federation_protocol.c.mapping_id.nullable) sp_table = sqlalchemy.Table(self.service_provider, self.metadata, autoload=True) self.assertFalse(sp_table.c.auth_url.nullable) self.assertFalse(sp_table.c.sp_url.nullable) @mock.patch.object(migration_helpers, 'get_db_version', return_value=8) def test_federation_already_migrated(self, mock_federation): # By setting the return value to 8, the migration has already been # run, and there's no need to create the table again. self.upgrade(82) mock_federation.assert_any_call(extension='federation', engine=mock.ANY) # It won't exist because we are mocking it, but we can verify # that 082 did not create the table. self.assertTableDoesNotExist('identity_provider') self.assertTableDoesNotExist('federation_protocol') self.assertTableDoesNotExist('mapping') self.assertTableDoesNotExist('service_provider') self.assertTableDoesNotExist('idp_remote_ids') def test_create_oauth_tables(self): consumer = 'consumer' request_token = 'request_token' access_token = 'access_token' self.assertTableDoesNotExist(consumer) self.assertTableDoesNotExist(request_token) self.assertTableDoesNotExist(access_token) self.upgrade(83) self.assertTableColumns(consumer, ['id', 'description', 'secret', 'extra']) self.assertTableColumns(request_token, ['id', 'request_secret', 'verifier', 'authorizing_user_id', 'requested_project_id', 'role_ids', 'consumer_id', 'expires_at']) self.assertTableColumns(access_token, ['id', 'access_secret', 'authorizing_user_id', 'project_id', 'role_ids', 'consumer_id', 'expires_at']) @mock.patch.object(migration_helpers, 'get_db_version', return_value=5) def test_oauth1_already_migrated(self, mock_oauth1): # By setting the return value to 5, the migration has already been # run, and there's no need to create the table again. self.upgrade(83) mock_oauth1.assert_any_call(extension='oauth1', engine=mock.ANY) # It won't exist because we are mocking it, but we can verify # that 083 did not create the table. self.assertTableDoesNotExist('consumer') self.assertTableDoesNotExist('request_token') self.assertTableDoesNotExist('access_token') def test_create_revoke_table(self): self.assertTableDoesNotExist('revocation_event') self.upgrade(84) self.assertTableColumns('revocation_event', ['id', 'domain_id', 'project_id', 'user_id', 'role_id', 'trust_id', 'consumer_id', 'access_token_id', 'issued_before', 'expires_at', 'revoked_at', 'audit_chain_id', 'audit_id']) @mock.patch.object(migration_helpers, 'get_db_version', return_value=2) def test_revoke_already_migrated(self, mock_revoke): # By setting the return value to 2, the migration has already been # run, and there's no need to create the table again. self.upgrade(84) mock_revoke.assert_any_call(extension='revoke', engine=mock.ANY) # It won't exist because we are mocking it, but we can verify # that 084 did not create the table. self.assertTableDoesNotExist('revocation_event') def test_project_is_domain_upgrade(self): self.upgrade(74) self.assertTableColumns('project', ['id', 'name', 'extra', 'description', 'enabled', 'domain_id', 'parent_id', 'is_domain']) def test_implied_roles_upgrade(self): self.upgrade(87) self.assertTableColumns('implied_role', ['prior_role_id', 'implied_role_id']) self.assertTrue(self.does_fk_exist('implied_role', 'prior_role_id')) self.assertTrue(self.does_fk_exist('implied_role', 'implied_role_id')) def test_add_config_registration(self): config_registration = 'config_register' self.upgrade(74) self.assertTableDoesNotExist(config_registration) self.upgrade(75) self.assertTableColumns(config_registration, ['type', 'domain_id']) def test_endpoint_filter_upgrade(self): def assert_tables_columns_exist(): self.assertTableColumns('project_endpoint', ['endpoint_id', 'project_id']) self.assertTableColumns('endpoint_group', ['id', 'name', 'description', 'filters']) self.assertTableColumns('project_endpoint_group', ['endpoint_group_id', 'project_id']) self.assertTableDoesNotExist('project_endpoint') self.upgrade(85) assert_tables_columns_exist() @mock.patch.object(migration_helpers, 'get_db_version', return_value=2) def test_endpoint_filter_already_migrated(self, mock_endpoint_filter): # By setting the return value to 2, the migration has already been # run, and there's no need to create the table again. self.upgrade(85) mock_endpoint_filter.assert_any_call(extension='endpoint_filter', engine=mock.ANY) # It won't exist because we are mocking it, but we can verify # that 085 did not create the table. self.assertTableDoesNotExist('project_endpoint') self.assertTableDoesNotExist('endpoint_group') self.assertTableDoesNotExist('project_endpoint_group') def test_add_trust_unique_constraint_upgrade(self): self.upgrade(86) inspector = reflection.Inspector.from_engine(self.engine) constraints = inspector.get_unique_constraints('trust') constraint_names = [constraint['name'] for constraint in constraints] self.assertIn('duplicate_trust_constraint', constraint_names) def test_add_domain_specific_roles(self): """Check database upgraded successfully for domain specific roles. The following items need to be checked: - The domain_id column has been added - That it has been added to the uniqueness constraints - Existing roles have their domain_id columns set to the specific string of '<>' """ NULL_DOMAIN_ID = '<>' self.upgrade(87) session = self.Session() role_table = sqlalchemy.Table('role', self.metadata, autoload=True) # Add a role before we upgrade, so we can check that its new domain_id # attribute is handled correctly role_id = uuid.uuid4().hex self.insert_dict(session, 'role', {'id': role_id, 'name': uuid.uuid4().hex}) session.close() self.upgrade(88) session = self.Session() self.metadata.clear() self.assertTableColumns('role', ['id', 'name', 'domain_id', 'extra']) # Check the domain_id has been added to the uniqueness constraint inspector = reflection.Inspector.from_engine(self.engine) constraints = inspector.get_unique_constraints('role') constraint_columns = [ constraint['column_names'] for constraint in constraints if constraint['name'] == 'ixu_role_name_domain_id'] self.assertIn('domain_id', constraint_columns[0]) # Now check our role has its domain_id attribute set correctly role_table = sqlalchemy.Table('role', self.metadata, autoload=True) cols = [role_table.c.domain_id] filter = role_table.c.id == role_id statement = sqlalchemy.select(cols).where(filter) role_entry = session.execute(statement).fetchone() self.assertEqual(NULL_DOMAIN_ID, role_entry[0]) def test_add_root_of_all_domains(self): NULL_DOMAIN_ID = '<>' self.upgrade(89) session = self.Session() domain_table = sqlalchemy.Table( 'domain', self.metadata, autoload=True) query = session.query(domain_table).filter_by(id=NULL_DOMAIN_ID) domain_from_db = query.one() self.assertIn(NULL_DOMAIN_ID, domain_from_db) project_table = sqlalchemy.Table( 'project', self.metadata, autoload=True) query = session.query(project_table).filter_by(id=NULL_DOMAIN_ID) project_from_db = query.one() self.assertIn(NULL_DOMAIN_ID, project_from_db) session.close() def test_add_local_user_and_password_tables(self): local_user_table = 'local_user' password_table = 'password' self.upgrade(89) self.assertTableDoesNotExist(local_user_table) self.assertTableDoesNotExist(password_table) self.upgrade(90) self.assertTableColumns(local_user_table, ['id', 'user_id', 'domain_id', 'name']) self.assertTableColumns(password_table, ['id', 'local_user_id', 'password']) def test_migrate_data_to_local_user_and_password_tables(self): def get_expected_users(): expected_users = [] for test_user in default_fixtures.USERS: user = {} user['id'] = uuid.uuid4().hex user['name'] = test_user['name'] user['domain_id'] = test_user['domain_id'] user['password'] = test_user['password'] user['enabled'] = True user['extra'] = json.dumps(uuid.uuid4().hex) user['default_project_id'] = uuid.uuid4().hex expected_users.append(user) return expected_users def add_users_to_db(expected_users, user_table): for user in expected_users: ins = user_table.insert().values( {'id': user['id'], 'name': user['name'], 'domain_id': user['domain_id'], 'password': user['password'], 'enabled': user['enabled'], 'extra': user['extra'], 'default_project_id': user['default_project_id']}) ins.execute() def get_users_from_db(user_table, local_user_table, password_table): sel = ( sqlalchemy.select([user_table.c.id, user_table.c.enabled, user_table.c.extra, user_table.c.default_project_id, local_user_table.c.name, local_user_table.c.domain_id, password_table.c.password]) .select_from(user_table.join(local_user_table, user_table.c.id == local_user_table.c.user_id) .join(password_table, local_user_table.c.id == password_table.c.local_user_id)) ) user_rows = sel.execute() users = [] for row in user_rows: users.append( {'id': row['id'], 'name': row['name'], 'domain_id': row['domain_id'], 'password': row['password'], 'enabled': row['enabled'], 'extra': row['extra'], 'default_project_id': row['default_project_id']}) return users meta = sqlalchemy.MetaData() meta.bind = self.engine user_table_name = 'user' local_user_table_name = 'local_user' password_table_name = 'password' # populate current user table self.upgrade(90) user_table = sqlalchemy.Table(user_table_name, meta, autoload=True) expected_users = get_expected_users() add_users_to_db(expected_users, user_table) # upgrade to migration and test self.upgrade(91) self.assertTableCountsMatch(user_table_name, local_user_table_name) self.assertTableCountsMatch(local_user_table_name, password_table_name) meta.clear() user_table = sqlalchemy.Table(user_table_name, meta, autoload=True) local_user_table = sqlalchemy.Table(local_user_table_name, meta, autoload=True) password_table = sqlalchemy.Table(password_table_name, meta, autoload=True) actual_users = get_users_from_db(user_table, local_user_table, password_table) self.assertListEqual(expected_users, actual_users) def test_migrate_user_with_null_password_to_password_tables(self): USER_TABLE_NAME = 'user' LOCAL_USER_TABLE_NAME = 'local_user' PASSWORD_TABLE_NAME = 'password' self.upgrade(90) user_ref = unit.new_user_ref(uuid.uuid4().hex) user_ref.pop('password') # pop extra attribute which doesn't recognized by SQL expression # layer. user_ref.pop('email') session = self.Session() self.insert_dict(session, USER_TABLE_NAME, user_ref) self.metadata.clear() self.upgrade(91) # migration should be successful. self.assertTableCountsMatch(USER_TABLE_NAME, LOCAL_USER_TABLE_NAME) # no new entry was added to the password table because the # user doesn't have a password. password_table = self.select_table(PASSWORD_TABLE_NAME) rows = session.execute(password_table.count()).scalar() self.assertEqual(0, rows) def test_migrate_user_skip_user_already_exist_in_local_user(self): USER_TABLE_NAME = 'user' LOCAL_USER_TABLE_NAME = 'local_user' self.upgrade(90) user1_ref = unit.new_user_ref(uuid.uuid4().hex) # pop extra attribute which doesn't recognized by SQL expression # layer. user1_ref.pop('email') user2_ref = unit.new_user_ref(uuid.uuid4().hex) user2_ref.pop('email') session = self.Session() self.insert_dict(session, USER_TABLE_NAME, user1_ref) self.insert_dict(session, USER_TABLE_NAME, user2_ref) user_id = user1_ref.pop('id') user_name = user1_ref.pop('name') domain_id = user1_ref.pop('domain_id') local_user_ref = {'user_id': user_id, 'name': user_name, 'domain_id': domain_id} self.insert_dict(session, LOCAL_USER_TABLE_NAME, local_user_ref) self.metadata.clear() self.upgrade(91) # migration should be successful and user2_ref has been migrated to # `local_user` table. self.assertTableCountsMatch(USER_TABLE_NAME, LOCAL_USER_TABLE_NAME) def test_implied_roles_fk_on_delete_cascade(self): if self.engine.name == 'sqlite': self.skipTest('sqlite backend does not support foreign keys') self.upgrade(92) def _create_three_roles(): id_list = [] for _ in range(3): role = unit.new_role_ref() self.role_api.create_role(role['id'], role) id_list.append(role['id']) return id_list role_id_list = _create_three_roles() self.role_api.create_implied_role(role_id_list[0], role_id_list[1]) self.role_api.create_implied_role(role_id_list[0], role_id_list[2]) # assert that there are two roles implied by role 0. implied_roles = self.role_api.list_implied_roles(role_id_list[0]) self.assertThat(implied_roles, matchers.HasLength(2)) self.role_api.delete_role(role_id_list[0]) # assert the cascade deletion is effective. implied_roles = self.role_api.list_implied_roles(role_id_list[0]) self.assertThat(implied_roles, matchers.HasLength(0)) def test_domain_as_project_upgrade(self): def _populate_domain_and_project_tables(session): # Three domains, with various different attributes self.domains = [{'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex, 'enabled': True, 'extra': {'description': uuid.uuid4().hex, 'another_attribute': True}}, {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex, 'enabled': True, 'extra': {'description': uuid.uuid4().hex}}, {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex, 'enabled': False}] # Four projects, two top level, two children self.projects = [] self.projects.append(unit.new_project_ref( domain_id=self.domains[0]['id'], parent_id=None)) self.projects.append(unit.new_project_ref( domain_id=self.domains[0]['id'], parent_id=self.projects[0]['id'])) self.projects.append(unit.new_project_ref( domain_id=self.domains[1]['id'], parent_id=None)) self.projects.append(unit.new_project_ref( domain_id=self.domains[1]['id'], parent_id=self.projects[2]['id'])) for domain in self.domains: this_domain = domain.copy() if 'extra' in this_domain: this_domain['extra'] = json.dumps(this_domain['extra']) self.insert_dict(session, 'domain', this_domain) for project in self.projects: self.insert_dict(session, 'project', project) def _check_projects(projects): def _assert_domain_matches_project(project): for domain in self.domains: if project.id == domain['id']: self.assertEqual(domain['name'], project.name) self.assertEqual(domain['enabled'], project.enabled) if domain['id'] == self.domains[0]['id']: self.assertEqual(domain['extra']['description'], project.description) self.assertEqual({'another_attribute': True}, json.loads(project.extra)) elif domain['id'] == self.domains[1]['id']: self.assertEqual(domain['extra']['description'], project.description) self.assertEqual({}, json.loads(project.extra)) # We had domains 3 we created, which should now be projects acting # as domains, To this we add the 4 original projects, plus the root # of all domains row. self.assertEqual(8, projects.count()) project_ids = [] for project in projects: if project.is_domain: self.assertEqual(NULL_DOMAIN_ID, project.domain_id) self.assertIsNone(project.parent_id) else: self.assertIsNotNone(project.domain_id) self.assertIsNotNone(project.parent_id) project_ids.append(project.id) for domain in self.domains: self.assertIn(domain['id'], project_ids) for project in self.projects: self.assertIn(project['id'], project_ids) # Now check the attributes of the domains came across OK for project in projects: _assert_domain_matches_project(project) NULL_DOMAIN_ID = '<>' self.upgrade(92) session = self.Session() _populate_domain_and_project_tables(session) self.upgrade(93) proj_table = sqlalchemy.Table('project', self.metadata, autoload=True) projects = session.query(proj_table) _check_projects(projects) def test_add_federated_user_table(self): federated_user_table = 'federated_user' self.upgrade(93) self.assertTableDoesNotExist(federated_user_table) self.upgrade(94) self.assertTableColumns(federated_user_table, ['id', 'user_id', 'idp_id', 'protocol_id', 'unique_id', 'display_name']) def test_add_int_pkey_to_revocation_event_table(self): meta = sqlalchemy.MetaData() meta.bind = self.engine REVOCATION_EVENT_TABLE_NAME = 'revocation_event' self.upgrade(94) revocation_event_table = sqlalchemy.Table(REVOCATION_EVENT_TABLE_NAME, meta, autoload=True) # assert id column is a string (before) self.assertEqual('VARCHAR(64)', str(revocation_event_table.c.id.type)) self.upgrade(95) meta.clear() revocation_event_table = sqlalchemy.Table(REVOCATION_EVENT_TABLE_NAME, meta, autoload=True) # assert id column is an integer (after) self.assertEqual('INTEGER', str(revocation_event_table.c.id.type)) def _add_unique_constraint_to_role_name(self, constraint_name='ixu_role_name'): meta = sqlalchemy.MetaData() meta.bind = self.engine role_table = sqlalchemy.Table('role', meta, autoload=True) migrate.UniqueConstraint(role_table.c.name, name=constraint_name).create() def _drop_unique_constraint_to_role_name(self, constraint_name='ixu_role_name'): role_table = sqlalchemy.Table('role', self.metadata, autoload=True) migrate.UniqueConstraint(role_table.c.name, name=constraint_name).drop() def test_migration_88_drops_unique_constraint(self): self.upgrade(87) if self.engine.name == 'mysql': self.assertTrue(self.does_index_exist('role', 'ixu_role_name')) else: self.assertTrue(self.does_constraint_exist('role', 'ixu_role_name')) self.upgrade(88) if self.engine.name == 'mysql': self.assertFalse(self.does_index_exist('role', 'ixu_role_name')) else: self.assertFalse(self.does_constraint_exist('role', 'ixu_role_name')) def test_migration_88_inconsistent_constraint_name(self): self.upgrade(87) self._drop_unique_constraint_to_role_name() constraint_name = uuid.uuid4().hex self._add_unique_constraint_to_role_name( constraint_name=constraint_name) if self.engine.name == 'mysql': self.assertTrue(self.does_index_exist('role', constraint_name)) self.assertFalse(self.does_index_exist('role', 'ixu_role_name')) else: self.assertTrue(self.does_constraint_exist('role', constraint_name)) self.assertFalse(self.does_constraint_exist('role', 'ixu_role_name')) self.upgrade(88) if self.engine.name == 'mysql': self.assertFalse(self.does_index_exist('role', constraint_name)) self.assertFalse(self.does_index_exist('role', 'ixu_role_name')) else: self.assertFalse(self.does_constraint_exist('role', constraint_name)) self.assertFalse(self.does_constraint_exist('role', 'ixu_role_name')) def test_migration_96(self): self.upgrade(95) if self.engine.name == 'mysql': self.assertFalse(self.does_index_exist('role', 'ixu_role_name')) else: self.assertFalse(self.does_constraint_exist('role', 'ixu_role_name')) self.upgrade(96) if self.engine.name == 'mysql': self.assertFalse(self.does_index_exist('role', 'ixu_role_name')) else: self.assertFalse(self.does_constraint_exist('role', 'ixu_role_name')) def test_migration_96_constraint_exists(self): self.upgrade(95) self._add_unique_constraint_to_role_name() if self.engine.name == 'mysql': self.assertTrue(self.does_index_exist('role', 'ixu_role_name')) else: self.assertTrue(self.does_constraint_exist('role', 'ixu_role_name')) self.upgrade(96) if self.engine.name == 'mysql': self.assertFalse(self.does_index_exist('role', 'ixu_role_name')) else: self.assertFalse(self.does_constraint_exist('role', 'ixu_role_name')) class VersionTests(SqlMigrateBase): _initial_db_version = migration_helpers.get_init_version() def test_core_initial(self): """Get the version before migrated, it's the initial DB version.""" version = migration_helpers.get_db_version() self.assertEqual(self._initial_db_version, version) def test_core_max(self): """When get the version after upgrading, it's the new version.""" self.upgrade(self.max_version) version = migration_helpers.get_db_version() self.assertEqual(self.max_version, version) def test_assert_not_schema_downgrade(self): self.upgrade(self.max_version) self.assertRaises( db_exception.DbMigrationError, migration_helpers._sync_common_repo, self.max_version - 1) def test_extension_not_controlled(self): """When get the version before controlling, raises DbMigrationError.""" self.assertRaises(db_exception.DbMigrationError, migration_helpers.get_db_version, extension='federation') def test_unexpected_extension(self): """The version for a non-existent extension raises ImportError.""" extension_name = uuid.uuid4().hex self.assertRaises(ImportError, migration_helpers.get_db_version, extension=extension_name) def test_unversioned_extension(self): """The version for extensions without migrations raise an exception.""" self.assertRaises(exception.MigrationNotProvided, migration_helpers.get_db_version, extension='admin_crud') keystone-9.0.0/keystone/tests/unit/test_backend_endpoint_policy.py0000664000567000056710000002761212701407102026765 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from six.moves import range from testtools import matchers from keystone import exception from keystone.tests import unit class PolicyAssociationTests(object): def _assert_correct_policy(self, endpoint, policy): ref = ( self.endpoint_policy_api.get_policy_for_endpoint(endpoint['id'])) self.assertEqual(policy['id'], ref['id']) def _assert_correct_endpoints(self, policy, endpoint_list): endpoint_id_list = [ep['id'] for ep in endpoint_list] endpoints = ( self.endpoint_policy_api.list_endpoints_for_policy(policy['id'])) self.assertThat(endpoints, matchers.HasLength(len(endpoint_list))) for endpoint in endpoints: self.assertIn(endpoint['id'], endpoint_id_list) def load_sample_data(self): """Create sample data to test policy associations. The following data is created: - 3 regions, in a hierarchy, 0 -> 1 -> 2 (where 0 is top) - 3 services - 6 endpoints, 2 in each region, with a mixture of services: 0 - region 0, Service 0 1 - region 0, Service 1 2 - region 1, Service 1 3 - region 1, Service 2 4 - region 2, Service 2 5 - region 2, Service 0 """ def new_endpoint(region_id, service_id): endpoint = unit.new_endpoint_ref(interface='test', region_id=region_id, service_id=service_id, url='/url') self.endpoint.append(self.catalog_api.create_endpoint( endpoint['id'], endpoint)) self.policy = [] self.endpoint = [] self.service = [] self.region = [] parent_region_id = None for i in range(3): policy = unit.new_policy_ref() self.policy.append(self.policy_api.create_policy(policy['id'], policy)) service = unit.new_service_ref() self.service.append(self.catalog_api.create_service(service['id'], service)) region = unit.new_region_ref(parent_region_id=parent_region_id) # Link the regions together as a hierarchy, [0] at the top parent_region_id = region['id'] self.region.append(self.catalog_api.create_region(region)) new_endpoint(self.region[0]['id'], self.service[0]['id']) new_endpoint(self.region[0]['id'], self.service[1]['id']) new_endpoint(self.region[1]['id'], self.service[1]['id']) new_endpoint(self.region[1]['id'], self.service[2]['id']) new_endpoint(self.region[2]['id'], self.service[2]['id']) new_endpoint(self.region[2]['id'], self.service[0]['id']) def test_policy_to_endpoint_association_crud(self): self.endpoint_policy_api.create_policy_association( self.policy[0]['id'], endpoint_id=self.endpoint[0]['id']) self.endpoint_policy_api.check_policy_association( self.policy[0]['id'], endpoint_id=self.endpoint[0]['id']) self.endpoint_policy_api.delete_policy_association( self.policy[0]['id'], endpoint_id=self.endpoint[0]['id']) self.assertRaises(exception.NotFound, self.endpoint_policy_api.check_policy_association, self.policy[0]['id'], endpoint_id=self.endpoint[0]['id']) def test_overwriting_policy_to_endpoint_association(self): self.endpoint_policy_api.create_policy_association( self.policy[0]['id'], endpoint_id=self.endpoint[0]['id']) self.endpoint_policy_api.create_policy_association( self.policy[1]['id'], endpoint_id=self.endpoint[0]['id']) self.assertRaises(exception.NotFound, self.endpoint_policy_api.check_policy_association, self.policy[0]['id'], endpoint_id=self.endpoint[0]['id']) self.endpoint_policy_api.check_policy_association( self.policy[1]['id'], endpoint_id=self.endpoint[0]['id']) def test_invalid_policy_to_endpoint_association(self): self.assertRaises(exception.InvalidPolicyAssociation, self.endpoint_policy_api.create_policy_association, self.policy[0]['id']) self.assertRaises(exception.InvalidPolicyAssociation, self.endpoint_policy_api.create_policy_association, self.policy[0]['id'], endpoint_id=self.endpoint[0]['id'], region_id=self.region[0]['id']) self.assertRaises(exception.InvalidPolicyAssociation, self.endpoint_policy_api.create_policy_association, self.policy[0]['id'], endpoint_id=self.endpoint[0]['id'], service_id=self.service[0]['id']) self.assertRaises(exception.InvalidPolicyAssociation, self.endpoint_policy_api.create_policy_association, self.policy[0]['id'], region_id=self.region[0]['id']) def test_policy_to_explicit_endpoint_association(self): # Associate policy 0 with endpoint 0 self.endpoint_policy_api.create_policy_association( self.policy[0]['id'], endpoint_id=self.endpoint[0]['id']) self._assert_correct_policy(self.endpoint[0], self.policy[0]) self._assert_correct_endpoints(self.policy[0], [self.endpoint[0]]) self.assertRaises(exception.NotFound, self.endpoint_policy_api.get_policy_for_endpoint, uuid.uuid4().hex) def test_policy_to_service_association(self): self.endpoint_policy_api.create_policy_association( self.policy[0]['id'], service_id=self.service[0]['id']) self.endpoint_policy_api.create_policy_association( self.policy[1]['id'], service_id=self.service[1]['id']) # Endpoints 0 and 5 are part of service 0 self._assert_correct_policy(self.endpoint[0], self.policy[0]) self._assert_correct_policy(self.endpoint[5], self.policy[0]) self._assert_correct_endpoints( self.policy[0], [self.endpoint[0], self.endpoint[5]]) # Endpoints 1 and 2 are part of service 1 self._assert_correct_policy(self.endpoint[1], self.policy[1]) self._assert_correct_policy(self.endpoint[2], self.policy[1]) self._assert_correct_endpoints( self.policy[1], [self.endpoint[1], self.endpoint[2]]) def test_policy_to_region_and_service_association(self): self.endpoint_policy_api.create_policy_association( self.policy[0]['id'], service_id=self.service[0]['id'], region_id=self.region[0]['id']) self.endpoint_policy_api.create_policy_association( self.policy[1]['id'], service_id=self.service[1]['id'], region_id=self.region[1]['id']) self.endpoint_policy_api.create_policy_association( self.policy[2]['id'], service_id=self.service[2]['id'], region_id=self.region[2]['id']) # Endpoint 0 is in region 0 with service 0, so should get policy 0 self._assert_correct_policy(self.endpoint[0], self.policy[0]) # Endpoint 5 is in Region 2 with service 0, so should also get # policy 0 by searching up the tree to Region 0 self._assert_correct_policy(self.endpoint[5], self.policy[0]) # Looking the other way round, policy 2 should only be in use by # endpoint 4, since that's the only endpoint in region 2 with the # correct service self._assert_correct_endpoints( self.policy[2], [self.endpoint[4]]) # Policy 1 should only be in use by endpoint 2, since that's the only # endpoint in region 1 (and region 2 below it) with the correct service self._assert_correct_endpoints( self.policy[1], [self.endpoint[2]]) # Policy 0 should be in use by endpoint 0, as well as 5 (since 5 is # of the correct service and in region 2 below it) self._assert_correct_endpoints( self.policy[0], [self.endpoint[0], self.endpoint[5]]) def test_delete_association_by_entity(self): self.endpoint_policy_api.create_policy_association( self.policy[0]['id'], endpoint_id=self.endpoint[0]['id']) self.endpoint_policy_api.delete_association_by_endpoint( self.endpoint[0]['id']) self.assertRaises(exception.NotFound, self.endpoint_policy_api.check_policy_association, self.policy[0]['id'], endpoint_id=self.endpoint[0]['id']) # Make sure deleting it again is silent - since this method is used # in response to notifications by the controller. self.endpoint_policy_api.delete_association_by_endpoint( self.endpoint[0]['id']) # Now try with service - ensure both combined region & service # associations and explicit service ones are removed self.endpoint_policy_api.create_policy_association( self.policy[0]['id'], service_id=self.service[0]['id'], region_id=self.region[0]['id']) self.endpoint_policy_api.create_policy_association( self.policy[1]['id'], service_id=self.service[0]['id'], region_id=self.region[1]['id']) self.endpoint_policy_api.create_policy_association( self.policy[0]['id'], service_id=self.service[0]['id']) self.endpoint_policy_api.delete_association_by_service( self.service[0]['id']) self.assertRaises(exception.NotFound, self.endpoint_policy_api.check_policy_association, self.policy[0]['id'], service_id=self.service[0]['id'], region_id=self.region[0]['id']) self.assertRaises(exception.NotFound, self.endpoint_policy_api.check_policy_association, self.policy[1]['id'], service_id=self.service[0]['id'], region_id=self.region[1]['id']) self.assertRaises(exception.NotFound, self.endpoint_policy_api.check_policy_association, self.policy[0]['id'], service_id=self.service[0]['id']) # Finally, check delete by region self.endpoint_policy_api.create_policy_association( self.policy[0]['id'], service_id=self.service[0]['id'], region_id=self.region[0]['id']) self.endpoint_policy_api.delete_association_by_region( self.region[0]['id']) self.assertRaises(exception.NotFound, self.endpoint_policy_api.check_policy_association, self.policy[0]['id'], service_id=self.service[0]['id'], region_id=self.region[0]['id']) self.assertRaises(exception.NotFound, self.endpoint_policy_api.check_policy_association, self.policy[0]['id'], service_id=self.service[0]['id']) keystone-9.0.0/keystone/tests/unit/test_config.py0000664000567000056710000000604412701407102023360 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from oslo_config import cfg from keystone.common import config from keystone import exception from keystone.tests import unit CONF = cfg.CONF class ConfigTestCase(unit.TestCase): def config_files(self): config_files = super(ConfigTestCase, self).config_files() # Insert the keystone sample as the first config file to be loaded # since it is used in one of the code paths to determine the paste-ini # location. config_files.insert(0, unit.dirs.etc('keystone.conf.sample')) return config_files def test_paste_config(self): self.assertEqual(unit.dirs.etc('keystone-paste.ini'), config.find_paste_config()) self.config_fixture.config(group='paste_deploy', config_file=uuid.uuid4().hex) self.assertRaises(exception.ConfigFileNotFound, config.find_paste_config) self.config_fixture.config(group='paste_deploy', config_file='') self.assertEqual(unit.dirs.etc('keystone.conf.sample'), config.find_paste_config()) def test_config_default(self): self.assertIs(None, CONF.auth.password) self.assertIs(None, CONF.auth.token) class DeprecatedTestCase(unit.TestCase): """Test using the original (deprecated) name for renamed options.""" def config_files(self): config_files = super(DeprecatedTestCase, self).config_files() config_files.append(unit.dirs.tests_conf('deprecated.conf')) return config_files def test_sql(self): # Options in [sql] were moved to [database] in Icehouse for the change # to use oslo-incubator's db.sqlalchemy.sessions. self.assertEqual('sqlite://deprecated', CONF.database.connection) self.assertEqual(54321, CONF.database.idle_timeout) class DeprecatedOverrideTestCase(unit.TestCase): """Test using the deprecated AND new name for renamed options.""" def config_files(self): config_files = super(DeprecatedOverrideTestCase, self).config_files() config_files.append(unit.dirs.tests_conf('deprecated_override.conf')) return config_files def test_sql(self): # Options in [sql] were moved to [database] in Icehouse for the change # to use oslo-incubator's db.sqlalchemy.sessions. self.assertEqual('sqlite://new', CONF.database.connection) self.assertEqual(65432, CONF.database.idle_timeout) keystone-9.0.0/keystone/tests/unit/test_cert_setup.py0000664000567000056710000002121712701407102024267 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import shutil import mock from six.moves import http_client from testtools import matchers from keystone.common import environment from keystone.common import openssl from keystone import exception from keystone.tests import unit from keystone.tests.unit import rest from keystone import token SSLDIR = unit.dirs.tmp('ssl') CONF = unit.CONF CERTDIR = os.path.join(SSLDIR, 'certs') KEYDIR = os.path.join(SSLDIR, 'private') class CertSetupTestCase(rest.RestfulTestCase): def setUp(self): super(CertSetupTestCase, self).setUp() def cleanup_ssldir(): try: shutil.rmtree(SSLDIR) except OSError: pass self.addCleanup(cleanup_ssldir) def config_overrides(self): super(CertSetupTestCase, self).config_overrides() ca_certs = os.path.join(CERTDIR, 'ca.pem') ca_key = os.path.join(CERTDIR, 'cakey.pem') self.config_fixture.config( group='signing', certfile=os.path.join(CERTDIR, 'signing_cert.pem'), ca_certs=ca_certs, ca_key=ca_key, keyfile=os.path.join(KEYDIR, 'signing_key.pem')) self.config_fixture.config( group='ssl', ca_key=ca_key) self.config_fixture.config( group='eventlet_server_ssl', ca_certs=ca_certs, certfile=os.path.join(CERTDIR, 'keystone.pem'), keyfile=os.path.join(KEYDIR, 'keystonekey.pem')) self.config_fixture.config(group='token', provider='pkiz') def test_can_handle_missing_certs(self): controller = token.controllers.Auth() self.config_fixture.config(group='signing', certfile='invalid') user = unit.create_user(self.identity_api, domain_id=CONF.identity.default_domain_id) body_dict = { 'passwordCredentials': { 'userId': user['id'], 'password': user['password'], }, } self.assertRaises(exception.UnexpectedError, controller.authenticate, {}, body_dict) def test_create_pki_certs(self, rebuild=False): pki = openssl.ConfigurePKI(None, None, rebuild=rebuild) pki.run() self.assertTrue(os.path.exists(CONF.signing.certfile)) self.assertTrue(os.path.exists(CONF.signing.ca_certs)) self.assertTrue(os.path.exists(CONF.signing.keyfile)) def test_create_ssl_certs(self, rebuild=False): ssl = openssl.ConfigureSSL(None, None, rebuild=rebuild) ssl.run() self.assertTrue(os.path.exists(CONF.eventlet_server_ssl.ca_certs)) self.assertTrue(os.path.exists(CONF.eventlet_server_ssl.certfile)) self.assertTrue(os.path.exists(CONF.eventlet_server_ssl.keyfile)) def test_fetch_signing_cert(self, rebuild=False): pki = openssl.ConfigurePKI(None, None, rebuild=rebuild) pki.run() # NOTE(jamielennox): Use request directly because certificate # requests don't have some of the normal information signing_resp = self.request(self.public_app, '/v2.0/certificates/signing', method='GET', expected_status=http_client.OK) cacert_resp = self.request(self.public_app, '/v2.0/certificates/ca', method='GET', expected_status=http_client.OK) with open(CONF.signing.certfile) as f: self.assertEqual(f.read(), signing_resp.text) with open(CONF.signing.ca_certs) as f: self.assertEqual(f.read(), cacert_resp.text) # NOTE(jamielennox): This is weird behaviour that we need to enforce. # It doesn't matter what you ask for it's always going to give text # with a text/html content_type. for path in ['/v2.0/certificates/signing', '/v2.0/certificates/ca']: for accept in [None, 'text/html', 'application/json', 'text/xml']: headers = {'Accept': accept} if accept else {} resp = self.request(self.public_app, path, method='GET', expected_status=http_client.OK, headers=headers) self.assertEqual('text/html', resp.content_type) def test_fetch_signing_cert_when_rebuild(self): pki = openssl.ConfigurePKI(None, None) pki.run() self.test_fetch_signing_cert(rebuild=True) def test_failure(self): for path in ['/v2.0/certificates/signing', '/v2.0/certificates/ca']: self.request(self.public_app, path, method='GET', expected_status=http_client.INTERNAL_SERVER_ERROR) def test_pki_certs_rebuild(self): self.test_create_pki_certs() with open(CONF.signing.certfile) as f: cert_file1 = f.read() self.test_create_pki_certs(rebuild=True) with open(CONF.signing.certfile) as f: cert_file2 = f.read() self.assertNotEqual(cert_file1, cert_file2) def test_ssl_certs_rebuild(self): self.test_create_ssl_certs() with open(CONF.eventlet_server_ssl.certfile) as f: cert_file1 = f.read() self.test_create_ssl_certs(rebuild=True) with open(CONF.eventlet_server_ssl.certfile) as f: cert_file2 = f.read() self.assertNotEqual(cert_file1, cert_file2) @mock.patch.object(os, 'remove') def test_rebuild_pki_certs_remove_error(self, mock_remove): self.test_create_pki_certs() with open(CONF.signing.certfile) as f: cert_file1 = f.read() mock_remove.side_effect = OSError() self.test_create_pki_certs(rebuild=True) with open(CONF.signing.certfile) as f: cert_file2 = f.read() self.assertEqual(cert_file1, cert_file2) @mock.patch.object(os, 'remove') def test_rebuild_ssl_certs_remove_error(self, mock_remove): self.test_create_ssl_certs() with open(CONF.eventlet_server_ssl.certfile) as f: cert_file1 = f.read() mock_remove.side_effect = OSError() self.test_create_ssl_certs(rebuild=True) with open(CONF.eventlet_server_ssl.certfile) as f: cert_file2 = f.read() self.assertEqual(cert_file1, cert_file2) def test_create_pki_certs_twice_without_rebuild(self): self.test_create_pki_certs() with open(CONF.signing.certfile) as f: cert_file1 = f.read() self.test_create_pki_certs() with open(CONF.signing.certfile) as f: cert_file2 = f.read() self.assertEqual(cert_file1, cert_file2) def test_create_ssl_certs_twice_without_rebuild(self): self.test_create_ssl_certs() with open(CONF.eventlet_server_ssl.certfile) as f: cert_file1 = f.read() self.test_create_ssl_certs() with open(CONF.eventlet_server_ssl.certfile) as f: cert_file2 = f.read() self.assertEqual(cert_file1, cert_file2) class TestExecCommand(unit.TestCase): @mock.patch.object(environment.subprocess.Popen, 'poll') def test_running_a_successful_command(self, mock_poll): mock_poll.return_value = 0 ssl = openssl.ConfigureSSL('keystone_user', 'keystone_group') ssl.exec_command(['ls']) @mock.patch.object(environment.subprocess, 'check_output') def test_running_an_invalid_command(self, mock_check_output): cmd = ['ls'] output = 'this is the output string' error = environment.subprocess.CalledProcessError(returncode=1, cmd=cmd, output=output) mock_check_output.side_effect = error ssl = openssl.ConfigureSSL('keystone_user', 'keystone_group') e = self.assertRaises(environment.subprocess.CalledProcessError, ssl.exec_command, cmd) self.assertThat(e.output, matchers.Equals(output)) keystone-9.0.0/keystone/tests/unit/test_sql_migrate_extensions.py0000664000567000056710000000660112701407102026700 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ To run these tests against a live database: 1. Modify the file `keystone/tests/unit/config_files/backend_sql.conf` to use the connection for your live database. 2. Set up a blank, live database. 3. Run the tests using:: tox -e py27 -- keystone.tests.unit.test_sql_migrate_extensions WARNING:: Your database will be wiped. Do not do this against a Database with valuable data as all data will be lost. """ from keystone.contrib import endpoint_filter from keystone.contrib import endpoint_policy from keystone.contrib import federation from keystone.contrib import oauth1 from keystone.contrib import revoke from keystone import exception from keystone.tests.unit import test_sql_upgrade class SqlUpgradeOAuth1Extension(test_sql_upgrade.SqlMigrateBase): OAUTH1_MIGRATIONS = 5 def repo_package(self): return oauth1 def test_upgrade(self): for version in range(self.OAUTH1_MIGRATIONS): v = version + 1 self.assertRaises(exception.MigrationMovedFailure, self.upgrade, version=v, repository=self.repo_path) class EndpointFilterExtension(test_sql_upgrade.SqlMigrateBase): ENDPOINT_FILTER_MIGRATIONS = 2 def repo_package(self): return endpoint_filter def test_upgrade(self): for version in range(self.ENDPOINT_FILTER_MIGRATIONS): v = version + 1 self.assertRaises(exception.MigrationMovedFailure, self.upgrade, version=v, repository=self.repo_path) class EndpointPolicyExtension(test_sql_upgrade.SqlMigrateBase): ENDPOINT_POLICY_MIGRATIONS = 1 def repo_package(self): return endpoint_policy def test_upgrade(self): self.assertRaises(exception.MigrationMovedFailure, self.upgrade, version=self.ENDPOINT_POLICY_MIGRATIONS, repository=self.repo_path) class FederationExtension(test_sql_upgrade.SqlMigrateBase): FEDERATION_MIGRATIONS = 8 def repo_package(self): return federation def test_upgrade(self): for version in range(self.FEDERATION_MIGRATIONS): v = version + 1 self.assertRaises(exception.MigrationMovedFailure, self.upgrade, version=v, repository=self.repo_path) class RevokeExtension(test_sql_upgrade.SqlMigrateBase): REVOKE_MIGRATIONS = 2 def repo_package(self): return revoke def test_upgrade(self): for version in range(self.REVOKE_MIGRATIONS): v = version + 1 self.assertRaises(exception.MigrationMovedFailure, self.upgrade, version=v, repository=self.repo_path) keystone-9.0.0/keystone/tests/unit/test_catalog.py0000664000567000056710000003574212701407102023534 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from six.moves import http_client from keystone import catalog from keystone.tests import unit from keystone.tests.unit.ksfixtures import database from keystone.tests.unit import rest BASE_URL = 'http://127.0.0.1:35357/v2' SERVICE_FIXTURE = object() class V2CatalogTestCase(rest.RestfulTestCase): def setUp(self): super(V2CatalogTestCase, self).setUp() self.useFixture(database.Database()) self.service = unit.new_service_ref() self.service_id = self.service['id'] self.catalog_api.create_service(self.service_id, self.service) # TODO(termie): add an admin user to the fixtures and use that user # override the fixtures, for now self.assignment_api.add_role_to_user_and_project( self.user_foo['id'], self.tenant_bar['id'], self.role_admin['id']) def config_overrides(self): super(V2CatalogTestCase, self).config_overrides() self.config_fixture.config(group='catalog', driver='sql') def _get_token_id(self, r): """Applicable only to JSON.""" return r.result['access']['token']['id'] def _endpoint_create(self, expected_status=http_client.OK, service_id=SERVICE_FIXTURE, publicurl='http://localhost:8080', internalurl='http://localhost:8080', adminurl='http://localhost:8080'): if service_id is SERVICE_FIXTURE: service_id = self.service_id path = '/v2.0/endpoints' body = { 'endpoint': { 'adminurl': adminurl, 'service_id': service_id, 'region': 'RegionOne', 'internalurl': internalurl, 'publicurl': publicurl } } r = self.admin_request(method='POST', token=self.get_scoped_token(), path=path, expected_status=expected_status, body=body) return body, r def _region_create(self): region = unit.new_region_ref() region_id = region['id'] self.catalog_api.create_region(region) return region_id def test_endpoint_create(self): req_body, response = self._endpoint_create() self.assertIn('endpoint', response.result) self.assertIn('id', response.result['endpoint']) for field, value in req_body['endpoint'].items(): self.assertEqual(value, response.result['endpoint'][field]) def test_pure_v3_endpoint_with_publicurl_visible_from_v2(self): """Test pure v3 endpoint can be fetched via v2.0 API. For those who are using v2.0 APIs, endpoints created by v3 API should also be visible as there are no differences about the endpoints except the format or the internal implementation. Since publicURL is required for v2.0 API, so only v3 endpoints of the service which have the public interface endpoint will be converted into v2.0 endpoints. """ region_id = self._region_create() # create v3 endpoints with three interfaces body = { 'endpoint': unit.new_endpoint_ref(self.service_id, region_id=region_id) } for interface in catalog.controllers.INTERFACES: body['endpoint']['interface'] = interface self.admin_request(method='POST', token=self.get_scoped_token(), path='/v3/endpoints', expected_status=http_client.CREATED, body=body) r = self.admin_request(token=self.get_scoped_token(), path='/v2.0/endpoints') # Endpoints of the service which have a public interface endpoint # will be returned via v2.0 API self.assertEqual(1, len(r.result['endpoints'])) v2_endpoint = r.result['endpoints'][0] self.assertEqual(self.service_id, v2_endpoint['service_id']) # This is not the focus of this test, so no different urls are used. self.assertEqual(body['endpoint']['url'], v2_endpoint['publicurl']) self.assertEqual(body['endpoint']['url'], v2_endpoint['adminurl']) self.assertEqual(body['endpoint']['url'], v2_endpoint['internalurl']) self.assertNotIn('name', v2_endpoint) v3_endpoint = self.catalog_api.get_endpoint(v2_endpoint['id']) # Checks the v3 public endpoint's id is the generated v2.0 endpoint self.assertEqual('public', v3_endpoint['interface']) self.assertEqual(self.service_id, v3_endpoint['service_id']) def test_pure_v3_endpoint_without_publicurl_invisible_from_v2(self): """Test that the v2.0 API can't fetch v3 endpoints without publicURLs. v2.0 API will return endpoints created by v3 API, but publicURL is required for the service in the v2.0 API, therefore v3 endpoints of a service which don't have publicURL will be ignored. """ region_id = self._region_create() # create a v3 endpoint without public interface body = { 'endpoint': unit.new_endpoint_ref(self.service_id, region_id=region_id) } for interface in catalog.controllers.INTERFACES: if interface == 'public': continue body['endpoint']['interface'] = interface self.admin_request(method='POST', token=self.get_scoped_token(), path='/v3/endpoints', expected_status=http_client.CREATED, body=body) r = self.admin_request(token=self.get_scoped_token(), path='/v2.0/endpoints') # v3 endpoints of a service which don't have publicURL can't be # fetched via v2.0 API self.assertEqual(0, len(r.result['endpoints'])) def test_endpoint_create_with_null_adminurl(self): req_body, response = self._endpoint_create(adminurl=None) self.assertIsNone(req_body['endpoint']['adminurl']) self.assertNotIn('adminurl', response.result['endpoint']) def test_endpoint_create_with_empty_adminurl(self): req_body, response = self._endpoint_create(adminurl='') self.assertEqual('', req_body['endpoint']['adminurl']) self.assertNotIn("adminurl", response.result['endpoint']) def test_endpoint_create_with_null_internalurl(self): req_body, response = self._endpoint_create(internalurl=None) self.assertIsNone(req_body['endpoint']['internalurl']) self.assertNotIn('internalurl', response.result['endpoint']) def test_endpoint_create_with_empty_internalurl(self): req_body, response = self._endpoint_create(internalurl='') self.assertEqual('', req_body['endpoint']['internalurl']) self.assertNotIn("internalurl", response.result['endpoint']) def test_endpoint_create_with_null_publicurl(self): self._endpoint_create(expected_status=http_client.BAD_REQUEST, publicurl=None) def test_endpoint_create_with_empty_publicurl(self): self._endpoint_create(expected_status=http_client.BAD_REQUEST, publicurl='') def test_endpoint_create_with_null_service_id(self): self._endpoint_create(expected_status=http_client.BAD_REQUEST, service_id=None) def test_endpoint_create_with_empty_service_id(self): self._endpoint_create(expected_status=http_client.BAD_REQUEST, service_id='') def test_endpoint_create_with_valid_url(self): """Create endpoint with valid URL should be tested, too.""" # list one valid url is enough, no need to list too much valid_url = 'http://127.0.0.1:8774/v1.1/$(tenant_id)s' # baseline tests that all valid URLs works self._endpoint_create(expected_status=http_client.OK, publicurl=valid_url, internalurl=valid_url, adminurl=valid_url) def test_endpoint_create_with_invalid_url(self): """Test the invalid cases: substitutions is not exactly right.""" invalid_urls = [ # using a substitution that is not whitelisted - KeyError 'http://127.0.0.1:8774/v1.1/$(nonexistent)s', # invalid formatting - ValueError 'http://127.0.0.1:8774/v1.1/$(tenant_id)', 'http://127.0.0.1:8774/v1.1/$(tenant_id)t', 'http://127.0.0.1:8774/v1.1/$(tenant_id', # invalid type specifier - TypeError # admin_url is a string not an int 'http://127.0.0.1:8774/v1.1/$(admin_url)d', ] # list one valid url is enough, no need to list too much valid_url = 'http://127.0.0.1:8774/v1.1/$(tenant_id)s' # Case one: publicurl, internalurl and adminurl are # all invalid for invalid_url in invalid_urls: self._endpoint_create(expected_status=http_client.BAD_REQUEST, publicurl=invalid_url, internalurl=invalid_url, adminurl=invalid_url) # Case two: publicurl, internalurl are invalid # and adminurl is valid for invalid_url in invalid_urls: self._endpoint_create(expected_status=http_client.BAD_REQUEST, publicurl=invalid_url, internalurl=invalid_url, adminurl=valid_url) # Case three: publicurl, adminurl are invalid # and internalurl is valid for invalid_url in invalid_urls: self._endpoint_create(expected_status=http_client.BAD_REQUEST, publicurl=invalid_url, internalurl=valid_url, adminurl=invalid_url) # Case four: internalurl, adminurl are invalid # and publicurl is valid for invalid_url in invalid_urls: self._endpoint_create(expected_status=http_client.BAD_REQUEST, publicurl=valid_url, internalurl=invalid_url, adminurl=invalid_url) # Case five: publicurl is invalid, internalurl # and adminurl are valid for invalid_url in invalid_urls: self._endpoint_create(expected_status=http_client.BAD_REQUEST, publicurl=invalid_url, internalurl=valid_url, adminurl=valid_url) # Case six: internalurl is invalid, publicurl # and adminurl are valid for invalid_url in invalid_urls: self._endpoint_create(expected_status=http_client.BAD_REQUEST, publicurl=valid_url, internalurl=invalid_url, adminurl=valid_url) # Case seven: adminurl is invalid, publicurl # and internalurl are valid for invalid_url in invalid_urls: self._endpoint_create(expected_status=http_client.BAD_REQUEST, publicurl=valid_url, internalurl=valid_url, adminurl=invalid_url) class TestV2CatalogAPISQL(unit.TestCase): def setUp(self): super(TestV2CatalogAPISQL, self).setUp() self.useFixture(database.Database()) self.catalog_api = catalog.Manager() service = unit.new_service_ref() self.service_id = service['id'] self.catalog_api.create_service(self.service_id, service) self.create_endpoint(service_id=self.service_id) def create_endpoint(self, service_id, **kwargs): endpoint = unit.new_endpoint_ref(service_id=service_id, region_id=None, **kwargs) self.catalog_api.create_endpoint(endpoint['id'], endpoint) return endpoint def config_overrides(self): super(TestV2CatalogAPISQL, self).config_overrides() self.config_fixture.config(group='catalog', driver='sql') def test_get_catalog_ignores_endpoints_with_invalid_urls(self): user_id = uuid.uuid4().hex tenant_id = uuid.uuid4().hex # the only endpoint in the catalog is the one created in setUp catalog = self.catalog_api.get_catalog(user_id, tenant_id) self.assertEqual(1, len(catalog)) # it's also the only endpoint in the backend self.assertEqual(1, len(self.catalog_api.list_endpoints())) # create a new, invalid endpoint - malformed type declaration self.create_endpoint(self.service_id, url='http://keystone/%(tenant_id)') # create a new, invalid endpoint - nonexistent key self.create_endpoint(self.service_id, url='http://keystone/%(you_wont_find_me)s') # verify that the invalid endpoints don't appear in the catalog catalog = self.catalog_api.get_catalog(user_id, tenant_id) self.assertEqual(1, len(catalog)) # all three endpoints appear in the backend self.assertEqual(3, len(self.catalog_api.list_endpoints())) def test_get_catalog_always_returns_service_name(self): user_id = uuid.uuid4().hex tenant_id = uuid.uuid4().hex # new_service_ref() returns a ref with a `name`. named_svc = unit.new_service_ref() self.catalog_api.create_service(named_svc['id'], named_svc) self.create_endpoint(service_id=named_svc['id']) # This time manually delete the generated `name`. unnamed_svc = unit.new_service_ref() del unnamed_svc['name'] self.catalog_api.create_service(unnamed_svc['id'], unnamed_svc) self.create_endpoint(service_id=unnamed_svc['id']) region = None catalog = self.catalog_api.get_catalog(user_id, tenant_id) self.assertEqual(named_svc['name'], catalog[region][named_svc['type']]['name']) # verify a name is not generated when the service is passed to the API self.assertEqual('', catalog[region][unnamed_svc['type']]['name']) keystone-9.0.0/keystone/tests/unit/test_kvs.py0000664000567000056710000006054012701407105022722 0ustar jenkinsjenkins00000000000000# Copyright 2013 Metacloud, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time import uuid from dogpile.cache import api from dogpile.cache import proxy import mock import six from testtools import matchers from keystone.common.kvs.backends import inmemdb from keystone.common.kvs.backends import memcached from keystone.common.kvs import core from keystone import exception from keystone.tests import unit NO_VALUE = api.NO_VALUE class MutexFixture(object): def __init__(self, storage_dict, key, timeout): self.database = storage_dict self.key = '_lock' + key def acquire(self, wait=True): while True: try: self.database[self.key] = 1 return True except KeyError: return False def release(self): self.database.pop(self.key, None) class KVSBackendFixture(inmemdb.MemoryBackend): def __init__(self, arguments): class InmemTestDB(dict): def __setitem__(self, key, value): if key in self: raise KeyError('Key %s already exists' % key) super(InmemTestDB, self).__setitem__(key, value) self._db = InmemTestDB() self.lock_timeout = arguments.pop('lock_timeout', 5) self.test_arg = arguments.pop('test_arg', None) def get_mutex(self, key): return MutexFixture(self._db, key, self.lock_timeout) @classmethod def key_mangler(cls, key): return 'KVSBackend_' + key class KVSBackendForcedKeyMangleFixture(KVSBackendFixture): use_backend_key_mangler = True @classmethod def key_mangler(cls, key): return 'KVSBackendForcedKeyMangle_' + key class RegionProxyFixture(proxy.ProxyBackend): """A test dogpile.cache proxy that does nothing.""" class RegionProxy2Fixture(proxy.ProxyBackend): """A test dogpile.cache proxy that does nothing.""" class TestMemcacheDriver(api.CacheBackend): """A test dogpile.cache backend. This test backend conforms to the mixin-mechanism for overriding set and set_multi methods on dogpile memcached drivers. """ class test_client(object): # FIXME(morganfainberg): Convert this test client over to using mock # and/or mock.MagicMock as appropriate def __init__(self): self.__name__ = 'TestingMemcacheDriverClientObject' self.set_arguments_passed = None self.keys_values = {} self.lock_set_time = None self.lock_expiry = None def set(self, key, value, **set_arguments): self.keys_values.clear() self.keys_values[key] = value self.set_arguments_passed = set_arguments def set_multi(self, mapping, **set_arguments): self.keys_values.clear() self.keys_values = mapping self.set_arguments_passed = set_arguments def add(self, key, value, expiry_time): # NOTE(morganfainberg): `add` is used in this case for the # memcache lock testing. If further testing is required around the # actual memcache `add` interface, this method should be # expanded to work more like the actual memcache `add` function if self.lock_expiry is not None and self.lock_set_time is not None: if time.time() - self.lock_set_time < self.lock_expiry: return False self.lock_expiry = expiry_time self.lock_set_time = time.time() return True def delete(self, key): # NOTE(morganfainberg): `delete` is used in this case for the # memcache lock testing. If further testing is required around the # actual memcache `delete` interface, this method should be # expanded to work more like the actual memcache `delete` function. self.lock_expiry = None self.lock_set_time = None return True def __init__(self, arguments): self.client = self.test_client() self.set_arguments = {} # NOTE(morganfainberg): This is the same logic as the dogpile backend # since we need to mirror that functionality for the `set_argument` # values to appear on the actual backend. if 'memcached_expire_time' in arguments: self.set_arguments['time'] = arguments['memcached_expire_time'] def set(self, key, value): self.client.set(key, value, **self.set_arguments) def set_multi(self, mapping): self.client.set_multi(mapping, **self.set_arguments) class KVSTest(unit.TestCase): def setUp(self): super(KVSTest, self).setUp() self.key_foo = 'foo_' + uuid.uuid4().hex self.value_foo = uuid.uuid4().hex self.key_bar = 'bar_' + uuid.uuid4().hex self.value_bar = {'complex_data_structure': uuid.uuid4().hex} self.addCleanup(memcached.VALID_DOGPILE_BACKENDS.pop, 'TestDriver', None) memcached.VALID_DOGPILE_BACKENDS['TestDriver'] = TestMemcacheDriver def _get_kvs_region(self, name=None): if name is None: name = uuid.uuid4().hex return core.get_key_value_store(name) def test_kvs_basic_configuration(self): # Test that the most basic configuration options pass through to the # backend. region_one = uuid.uuid4().hex region_two = uuid.uuid4().hex test_arg = 100 kvs = self._get_kvs_region(region_one) kvs.configure('openstack.kvs.Memory') self.assertIsInstance(kvs._region.backend, inmemdb.MemoryBackend) self.assertEqual(region_one, kvs._region.name) kvs = self._get_kvs_region(region_two) kvs.configure('openstack.kvs.KVSBackendFixture', test_arg=test_arg) self.assertEqual(region_two, kvs._region.name) self.assertEqual(test_arg, kvs._region.backend.test_arg) def test_kvs_proxy_configuration(self): # Test that proxies are applied correctly and in the correct (reverse) # order to the kvs region. kvs = self._get_kvs_region() kvs.configure( 'openstack.kvs.Memory', proxy_list=['keystone.tests.unit.test_kvs.RegionProxyFixture', 'keystone.tests.unit.test_kvs.RegionProxy2Fixture']) self.assertIsInstance(kvs._region.backend, RegionProxyFixture) self.assertIsInstance(kvs._region.backend.proxied, RegionProxy2Fixture) self.assertIsInstance(kvs._region.backend.proxied.proxied, inmemdb.MemoryBackend) def test_kvs_key_mangler_fallthrough_default(self): # Test to make sure we default to the standard dogpile sha1 hashing # key_mangler kvs = self._get_kvs_region() kvs.configure('openstack.kvs.Memory') self.assertIs(kvs._region.key_mangler, core.sha1_mangle_key) # The backend should also have the keymangler set the same as the # region now. self.assertIs(kvs._region.backend.key_mangler, core.sha1_mangle_key) def test_kvs_key_mangler_configuration_backend(self): kvs = self._get_kvs_region() kvs.configure('openstack.kvs.KVSBackendFixture') expected = KVSBackendFixture.key_mangler(self.key_foo) self.assertEqual(expected, kvs._region.key_mangler(self.key_foo)) def test_kvs_key_mangler_configuration_forced_backend(self): kvs = self._get_kvs_region() kvs.configure('openstack.kvs.KVSBackendForcedKeyMangleFixture', key_mangler=core.sha1_mangle_key) expected = KVSBackendForcedKeyMangleFixture.key_mangler(self.key_foo) self.assertEqual(expected, kvs._region.key_mangler(self.key_foo)) def test_kvs_key_mangler_configuration_disabled(self): # Test that no key_mangler is set if enable_key_mangler is false self.config_fixture.config(group='kvs', enable_key_mangler=False) kvs = self._get_kvs_region() kvs.configure('openstack.kvs.Memory') self.assertIsNone(kvs._region.key_mangler) self.assertIsNone(kvs._region.backend.key_mangler) def test_kvs_key_mangler_set_on_backend(self): def test_key_mangler(key): return key kvs = self._get_kvs_region() kvs.configure('openstack.kvs.Memory') self.assertIs(kvs._region.backend.key_mangler, core.sha1_mangle_key) kvs._set_key_mangler(test_key_mangler) self.assertIs(kvs._region.backend.key_mangler, test_key_mangler) def test_kvs_basic_get_set_delete(self): # Test the basic get/set/delete actions on the KVS region kvs = self._get_kvs_region() kvs.configure('openstack.kvs.Memory') # Not found should be raised if the key doesn't exist self.assertRaises(exception.NotFound, kvs.get, key=self.key_bar) kvs.set(self.key_bar, self.value_bar) returned_value = kvs.get(self.key_bar) # The returned value should be the same value as the value in .set self.assertEqual(self.value_bar, returned_value) # The value should not be the exact object used in .set self.assertIsNot(returned_value, self.value_bar) kvs.delete(self.key_bar) # Second delete should raise NotFound self.assertRaises(exception.NotFound, kvs.delete, key=self.key_bar) def _kvs_multi_get_set_delete(self, kvs): keys = [self.key_foo, self.key_bar] expected = [self.value_foo, self.value_bar] kvs.set_multi({self.key_foo: self.value_foo, self.key_bar: self.value_bar}) # Returned value from get_multi should be a list of the values of the # keys self.assertEqual(expected, kvs.get_multi(keys)) # Delete both keys kvs.delete_multi(keys) # make sure that NotFound is properly raised when trying to get the now # deleted keys self.assertRaises(exception.NotFound, kvs.get_multi, keys=keys) self.assertRaises(exception.NotFound, kvs.get, key=self.key_foo) self.assertRaises(exception.NotFound, kvs.get, key=self.key_bar) # Make sure get_multi raises NotFound if one of the keys isn't found kvs.set(self.key_foo, self.value_foo) self.assertRaises(exception.NotFound, kvs.get_multi, keys=keys) def test_kvs_multi_get_set_delete(self): kvs = self._get_kvs_region() kvs.configure('openstack.kvs.Memory') self._kvs_multi_get_set_delete(kvs) def test_kvs_locking_context_handler(self): # Make sure we're creating the correct key/value pairs for the backend # distributed locking mutex. self.config_fixture.config(group='kvs', enable_key_mangler=False) kvs = self._get_kvs_region() kvs.configure('openstack.kvs.KVSBackendFixture') lock_key = '_lock' + self.key_foo self.assertNotIn(lock_key, kvs._region.backend._db) with core.KeyValueStoreLock(kvs._mutex(self.key_foo), self.key_foo): self.assertIn(lock_key, kvs._region.backend._db) self.assertIs(kvs._region.backend._db[lock_key], 1) self.assertNotIn(lock_key, kvs._region.backend._db) def test_kvs_locking_context_handler_locking_disabled(self): # Make sure no creation of key/value pairs for the backend # distributed locking mutex occurs if locking is disabled. self.config_fixture.config(group='kvs', enable_key_mangler=False) kvs = self._get_kvs_region() kvs.configure('openstack.kvs.KVSBackendFixture', locking=False) lock_key = '_lock' + self.key_foo self.assertNotIn(lock_key, kvs._region.backend._db) with core.KeyValueStoreLock(kvs._mutex(self.key_foo), self.key_foo, False): self.assertNotIn(lock_key, kvs._region.backend._db) self.assertNotIn(lock_key, kvs._region.backend._db) def test_kvs_with_lock_action_context_manager_timeout(self): kvs = self._get_kvs_region() lock_timeout = 5 kvs.configure('openstack.kvs.Memory', lock_timeout=lock_timeout) def do_with_lock_action_timeout(kvs_region, key, offset): with kvs_region.get_lock(key) as lock_in_use: self.assertTrue(lock_in_use.active) # Subtract the offset from the acquire_time. If this puts the # acquire_time difference from time.time() at >= lock_timeout # this should raise a LockTimeout exception. This is because # there is a built-in 1-second overlap where the context # manager thinks the lock is expired but the lock is still # active. This is to help mitigate race conditions on the # time-check itself. lock_in_use.acquire_time -= offset with kvs_region._action_with_lock(key, lock_in_use): pass # This should succeed, we are not timed-out here. do_with_lock_action_timeout(kvs, key=uuid.uuid4().hex, offset=2) # Try it now with an offset equal to the lock_timeout self.assertRaises(core.LockTimeout, do_with_lock_action_timeout, kvs_region=kvs, key=uuid.uuid4().hex, offset=lock_timeout) # Final test with offset significantly greater than the lock_timeout self.assertRaises(core.LockTimeout, do_with_lock_action_timeout, kvs_region=kvs, key=uuid.uuid4().hex, offset=100) def test_kvs_with_lock_action_mismatched_keys(self): kvs = self._get_kvs_region() kvs.configure('openstack.kvs.Memory') def do_with_lock_action(kvs_region, lock_key, target_key): with kvs_region.get_lock(lock_key) as lock_in_use: self.assertTrue(lock_in_use.active) with kvs_region._action_with_lock(target_key, lock_in_use): pass # Ensure we raise a ValueError if the lock key mismatches from the # target key. self.assertRaises(ValueError, do_with_lock_action, kvs_region=kvs, lock_key=self.key_foo, target_key=self.key_bar) def test_kvs_with_lock_action_context_manager(self): # Make sure we're creating the correct key/value pairs for the backend # distributed locking mutex. self.config_fixture.config(group='kvs', enable_key_mangler=False) kvs = self._get_kvs_region() kvs.configure('openstack.kvs.KVSBackendFixture') lock_key = '_lock' + self.key_foo self.assertNotIn(lock_key, kvs._region.backend._db) with kvs.get_lock(self.key_foo) as lock: with kvs._action_with_lock(self.key_foo, lock): self.assertTrue(lock.active) self.assertIn(lock_key, kvs._region.backend._db) self.assertIs(kvs._region.backend._db[lock_key], 1) self.assertNotIn(lock_key, kvs._region.backend._db) def test_kvs_with_lock_action_context_manager_no_lock(self): # Make sure we're not locking unless an actual lock is passed into the # context manager self.config_fixture.config(group='kvs', enable_key_mangler=False) kvs = self._get_kvs_region() kvs.configure('openstack.kvs.KVSBackendFixture') lock_key = '_lock' + self.key_foo lock = None self.assertNotIn(lock_key, kvs._region.backend._db) with kvs._action_with_lock(self.key_foo, lock): self.assertNotIn(lock_key, kvs._region.backend._db) self.assertNotIn(lock_key, kvs._region.backend._db) def test_kvs_backend_registration_does_not_reregister_backends(self): # SetUp registers the test backends. Running this again would raise an # exception if re-registration of the backends occurred. kvs = self._get_kvs_region() kvs.configure('openstack.kvs.Memory') core._register_backends() def test_kvs_memcached_manager_valid_dogpile_memcached_backend(self): kvs = self._get_kvs_region() kvs.configure('openstack.kvs.Memcached', memcached_backend='TestDriver') self.assertIsInstance(kvs._region.backend.driver, TestMemcacheDriver) def test_kvs_memcached_manager_invalid_dogpile_memcached_backend(self): # Invalid dogpile memcache backend should raise ValueError kvs = self._get_kvs_region() self.assertRaises(ValueError, kvs.configure, backing_store='openstack.kvs.Memcached', memcached_backend=uuid.uuid4().hex) def test_kvs_memcache_manager_no_expiry_keys(self): # Make sure the memcache backend recalculates the no-expiry keys # correctly when a key-mangler is set on it. def new_mangler(key): return '_mangled_key_' + key kvs = self._get_kvs_region() no_expiry_keys = set(['test_key']) kvs.configure('openstack.kvs.Memcached', memcached_backend='TestDriver', no_expiry_keys=no_expiry_keys) calculated_keys = set([kvs._region.key_mangler(key) for key in no_expiry_keys]) self.assertIs(kvs._region.backend.key_mangler, core.sha1_mangle_key) self.assertSetEqual(calculated_keys, kvs._region.backend.no_expiry_hashed_keys) self.assertSetEqual(no_expiry_keys, kvs._region.backend.raw_no_expiry_keys) calculated_keys = set([new_mangler(key) for key in no_expiry_keys]) kvs._region.backend.key_mangler = new_mangler self.assertSetEqual(calculated_keys, kvs._region.backend.no_expiry_hashed_keys) self.assertSetEqual(no_expiry_keys, kvs._region.backend.raw_no_expiry_keys) def test_kvs_memcache_key_mangler_set_to_none(self): kvs = self._get_kvs_region() no_expiry_keys = set(['test_key']) kvs.configure('openstack.kvs.Memcached', memcached_backend='TestDriver', no_expiry_keys=no_expiry_keys) self.assertIs(kvs._region.backend.key_mangler, core.sha1_mangle_key) kvs._region.backend.key_mangler = None self.assertSetEqual(kvs._region.backend.raw_no_expiry_keys, kvs._region.backend.no_expiry_hashed_keys) self.assertIsNone(kvs._region.backend.key_mangler) def test_noncallable_key_mangler_set_on_driver_raises_type_error(self): kvs = self._get_kvs_region() kvs.configure('openstack.kvs.Memcached', memcached_backend='TestDriver') self.assertRaises(TypeError, setattr, kvs._region.backend, 'key_mangler', 'Non-Callable') def test_kvs_memcache_set_arguments_and_memcache_expires_ttl(self): # Test the "set_arguments" (arguments passed on all set calls) logic # and the no-expiry-key modifications of set_arguments for the explicit # memcache TTL. self.config_fixture.config(group='kvs', enable_key_mangler=False) kvs = self._get_kvs_region() memcache_expire_time = 86400 expected_set_args = {'time': memcache_expire_time} expected_no_expiry_args = {} expected_foo_keys = [self.key_foo] expected_bar_keys = [self.key_bar] mapping_foo = {self.key_foo: self.value_foo} mapping_bar = {self.key_bar: self.value_bar} kvs.configure(backing_store='openstack.kvs.Memcached', memcached_backend='TestDriver', memcached_expire_time=memcache_expire_time, some_other_arg=uuid.uuid4().hex, no_expiry_keys=[self.key_bar]) kvs_driver = kvs._region.backend.driver # Ensure the set_arguments are correct self.assertDictEqual( expected_set_args, kvs._region.backend._get_set_arguments_driver_attr()) # Set a key that would have an expiry and verify the correct result # occurred and that the correct set_arguments were passed. kvs.set(self.key_foo, self.value_foo) self.assertDictEqual( expected_set_args, kvs._region.backend.driver.client.set_arguments_passed) observed_foo_keys = list(kvs_driver.client.keys_values.keys()) self.assertEqual(expected_foo_keys, observed_foo_keys) self.assertEqual( self.value_foo, kvs._region.backend.driver.client.keys_values[self.key_foo][0]) # Set a key that would not have an expiry and verify the correct result # occurred and that the correct set_arguments were passed. kvs.set(self.key_bar, self.value_bar) self.assertDictEqual( expected_no_expiry_args, kvs._region.backend.driver.client.set_arguments_passed) observed_bar_keys = list(kvs_driver.client.keys_values.keys()) self.assertEqual(expected_bar_keys, observed_bar_keys) self.assertEqual( self.value_bar, kvs._region.backend.driver.client.keys_values[self.key_bar][0]) # set_multi a dict that would have an expiry and verify the correct # result occurred and that the correct set_arguments were passed. kvs.set_multi(mapping_foo) self.assertDictEqual( expected_set_args, kvs._region.backend.driver.client.set_arguments_passed) observed_foo_keys = list(kvs_driver.client.keys_values.keys()) self.assertEqual(expected_foo_keys, observed_foo_keys) self.assertEqual( self.value_foo, kvs._region.backend.driver.client.keys_values[self.key_foo][0]) # set_multi a dict that would not have an expiry and verify the correct # result occurred and that the correct set_arguments were passed. kvs.set_multi(mapping_bar) self.assertDictEqual( expected_no_expiry_args, kvs._region.backend.driver.client.set_arguments_passed) observed_bar_keys = list(kvs_driver.client.keys_values.keys()) self.assertEqual(expected_bar_keys, observed_bar_keys) self.assertEqual( self.value_bar, kvs._region.backend.driver.client.keys_values[self.key_bar][0]) def test_memcached_lock_max_lock_attempts(self): kvs = self._get_kvs_region() max_lock_attempts = 1 test_key = uuid.uuid4().hex kvs.configure(backing_store='openstack.kvs.Memcached', memcached_backend='TestDriver', max_lock_attempts=max_lock_attempts) self.assertEqual(max_lock_attempts, kvs._region.backend.max_lock_attempts) # Simple Lock success test with kvs.get_lock(test_key) as lock: kvs.set(test_key, 'testing', lock) def lock_within_a_lock(key): with kvs.get_lock(key) as first_lock: kvs.set(test_key, 'lock', first_lock) with kvs.get_lock(key) as second_lock: kvs.set(key, 'lock-within-a-lock', second_lock) self.assertRaises(exception.UnexpectedError, lock_within_a_lock, key=test_key) class TestMemcachedBackend(unit.TestCase): @mock.patch('keystone.common.kvs.backends.memcached._', six.text_type) def test_invalid_backend_fails_initialization(self): raises_valueerror = matchers.Raises(matchers.MatchesException( ValueError, r'.*FakeBackend.*')) options = { 'url': 'needed to get to the focus of this test (the backend)', 'memcached_backend': 'FakeBackend', } self.assertThat(lambda: memcached.MemcachedBackend(options), raises_valueerror) keystone-9.0.0/keystone/tests/unit/test_v3_domain_config.py0000664000567000056710000005241712701407102025324 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import uuid from oslo_config import cfg from six.moves import http_client from keystone import exception from keystone.tests import unit from keystone.tests.unit import test_v3 CONF = cfg.CONF class DomainConfigTestCase(test_v3.RestfulTestCase): """Test domain config support.""" def setUp(self): super(DomainConfigTestCase, self).setUp() self.domain = unit.new_domain_ref() self.resource_api.create_domain(self.domain['id'], self.domain) self.config = {'ldap': {'url': uuid.uuid4().hex, 'user_tree_dn': uuid.uuid4().hex}, 'identity': {'driver': uuid.uuid4().hex}} def test_create_config(self): """Call ``PUT /domains/{domain_id}/config``.""" url = '/domains/%(domain_id)s/config' % { 'domain_id': self.domain['id']} r = self.put(url, body={'config': self.config}, expected_status=http_client.CREATED) res = self.domain_config_api.get_config(self.domain['id']) self.assertEqual(self.config, r.result['config']) self.assertEqual(self.config, res) def test_create_config_invalid_domain(self): """Call ``PUT /domains/{domain_id}/config`` While creating Identity API-based domain config with an invalid domain id provided, the request shall be rejected with a response, 404 domain not found. """ invalid_domain_id = uuid.uuid4().hex url = '/domains/%(domain_id)s/config' % { 'domain_id': invalid_domain_id} self.put(url, body={'config': self.config}, expected_status=exception.DomainNotFound.code) def test_create_config_twice(self): """Check multiple creates don't throw error""" self.put('/domains/%(domain_id)s/config' % { 'domain_id': self.domain['id']}, body={'config': self.config}, expected_status=http_client.CREATED) self.put('/domains/%(domain_id)s/config' % { 'domain_id': self.domain['id']}, body={'config': self.config}, expected_status=http_client.OK) def test_delete_config(self): """Call ``DELETE /domains{domain_id}/config``.""" self.domain_config_api.create_config(self.domain['id'], self.config) self.delete('/domains/%(domain_id)s/config' % { 'domain_id': self.domain['id']}) self.get('/domains/%(domain_id)s/config' % { 'domain_id': self.domain['id']}, expected_status=exception.DomainConfigNotFound.code) def test_delete_config_invalid_domain(self): """Call ``DELETE /domains{domain_id}/config`` While deleting Identity API-based domain config with an invalid domain id provided, the request shall be rejected with a response, 404 domain not found. """ self.domain_config_api.create_config(self.domain['id'], self.config) invalid_domain_id = uuid.uuid4().hex self.delete('/domains/%(domain_id)s/config' % { 'domain_id': invalid_domain_id}, expected_status=exception.DomainNotFound.code) def test_delete_config_by_group(self): """Call ``DELETE /domains{domain_id}/config/{group}``.""" self.domain_config_api.create_config(self.domain['id'], self.config) self.delete('/domains/%(domain_id)s/config/ldap' % { 'domain_id': self.domain['id']}) res = self.domain_config_api.get_config(self.domain['id']) self.assertNotIn('ldap', res) def test_delete_config_by_group_invalid_domain(self): """Call ``DELETE /domains{domain_id}/config/{group}`` While deleting Identity API-based domain config by group with an invalid domain id provided, the request shall be rejected with a response 404 domain not found. """ self.domain_config_api.create_config(self.domain['id'], self.config) invalid_domain_id = uuid.uuid4().hex self.delete('/domains/%(domain_id)s/config/ldap' % { 'domain_id': invalid_domain_id}, expected_status=exception.DomainNotFound.code) def test_get_head_config(self): """Call ``GET & HEAD for /domains{domain_id}/config``.""" self.domain_config_api.create_config(self.domain['id'], self.config) url = '/domains/%(domain_id)s/config' % { 'domain_id': self.domain['id']} r = self.get(url) self.assertEqual(self.config, r.result['config']) self.head(url, expected_status=http_client.OK) def test_get_config_by_group(self): """Call ``GET & HEAD /domains{domain_id}/config/{group}``.""" self.domain_config_api.create_config(self.domain['id'], self.config) url = '/domains/%(domain_id)s/config/ldap' % { 'domain_id': self.domain['id']} r = self.get(url) self.assertEqual({'ldap': self.config['ldap']}, r.result['config']) self.head(url, expected_status=http_client.OK) def test_get_config_by_group_invalid_domain(self): """Call ``GET & HEAD /domains{domain_id}/config/{group}`` While retrieving Identity API-based domain config by group with an invalid domain id provided, the request shall be rejected with a response 404 domain not found. """ self.domain_config_api.create_config(self.domain['id'], self.config) invalid_domain_id = uuid.uuid4().hex self.get('/domains/%(domain_id)s/config/ldap' % { 'domain_id': invalid_domain_id}, expected_status=exception.DomainNotFound.code) def test_get_config_by_option(self): """Call ``GET & HEAD /domains{domain_id}/config/{group}/{option}``.""" self.domain_config_api.create_config(self.domain['id'], self.config) url = '/domains/%(domain_id)s/config/ldap/url' % { 'domain_id': self.domain['id']} r = self.get(url) self.assertEqual({'url': self.config['ldap']['url']}, r.result['config']) self.head(url, expected_status=http_client.OK) def test_get_config_by_option_invalid_domain(self): """Call ``GET & HEAD /domains{domain_id}/config/{group}/{option}`` While retrieving Identity API-based domain config by option with an invalid domain id provided, the request shall be rejected with a response 404 domain not found. """ self.domain_config_api.create_config(self.domain['id'], self.config) invalid_domain_id = uuid.uuid4().hex self.get('/domains/%(domain_id)s/config/ldap/url' % { 'domain_id': invalid_domain_id}, expected_status=exception.DomainNotFound.code) def test_get_non_existant_config(self): """Call ``GET /domains{domain_id}/config when no config defined``.""" self.get('/domains/%(domain_id)s/config' % { 'domain_id': self.domain['id']}, expected_status=http_client.NOT_FOUND) def test_get_non_existant_config_invalid_domain(self): """Call ``GET /domains{domain_id}/config when no config defined`` While retrieving non-existent Identity API-based domain config with an invalid domain id provided, the request shall be rejected with a response 404 domain not found. """ invalid_domain_id = uuid.uuid4().hex self.get('/domains/%(domain_id)s/config' % { 'domain_id': invalid_domain_id}, expected_status=exception.DomainNotFound.code) def test_get_non_existant_config_group(self): """Call ``GET /domains{domain_id}/config/{group_not_exist}``.""" config = {'ldap': {'url': uuid.uuid4().hex}} self.domain_config_api.create_config(self.domain['id'], config) self.get('/domains/%(domain_id)s/config/identity' % { 'domain_id': self.domain['id']}, expected_status=http_client.NOT_FOUND) def test_get_non_existant_config_group_invalid_domain(self): """Call ``GET /domains{domain_id}/config/{group_not_exist}`` While retrieving non-existent Identity API-based domain config group with an invalid domain id provided, the request shall be rejected with a response, 404 domain not found. """ config = {'ldap': {'url': uuid.uuid4().hex}} self.domain_config_api.create_config(self.domain['id'], config) invalid_domain_id = uuid.uuid4().hex self.get('/domains/%(domain_id)s/config/identity' % { 'domain_id': invalid_domain_id}, expected_status=exception.DomainNotFound.code) def test_get_non_existant_config_option(self): """Call ``GET /domains{domain_id}/config/group/{option_not_exist}``.""" config = {'ldap': {'url': uuid.uuid4().hex}} self.domain_config_api.create_config(self.domain['id'], config) self.get('/domains/%(domain_id)s/config/ldap/user_tree_dn' % { 'domain_id': self.domain['id']}, expected_status=http_client.NOT_FOUND) def test_get_non_existant_config_option_invalid_domain(self): """Call ``GET /domains{domain_id}/config/group/{option_not_exist}`` While retrieving non-existent Identity API-based domain config option with an invalid domain id provided, the request shall be rejected with a response, 404 domain not found. """ config = {'ldap': {'url': uuid.uuid4().hex}} self.domain_config_api.create_config(self.domain['id'], config) invalid_domain_id = uuid.uuid4().hex self.get('/domains/%(domain_id)s/config/ldap/user_tree_dn' % { 'domain_id': invalid_domain_id}, expected_status=exception.DomainNotFound.code) def test_update_config(self): """Call ``PATCH /domains/{domain_id}/config``.""" self.domain_config_api.create_config(self.domain['id'], self.config) new_config = {'ldap': {'url': uuid.uuid4().hex}, 'identity': {'driver': uuid.uuid4().hex}} r = self.patch('/domains/%(domain_id)s/config' % { 'domain_id': self.domain['id']}, body={'config': new_config}) res = self.domain_config_api.get_config(self.domain['id']) expected_config = copy.deepcopy(self.config) expected_config['ldap']['url'] = new_config['ldap']['url'] expected_config['identity']['driver'] = ( new_config['identity']['driver']) self.assertEqual(expected_config, r.result['config']) self.assertEqual(expected_config, res) def test_update_config_invalid_domain(self): """Call ``PATCH /domains/{domain_id}/config`` While updating Identity API-based domain config with an invalid domain id provided, the request shall be rejected with a response, 404 domain not found. """ self.domain_config_api.create_config(self.domain['id'], self.config) new_config = {'ldap': {'url': uuid.uuid4().hex}, 'identity': {'driver': uuid.uuid4().hex}} invalid_domain_id = uuid.uuid4().hex self.patch('/domains/%(domain_id)s/config' % { 'domain_id': invalid_domain_id}, body={'config': new_config}, expected_status=exception.DomainNotFound.code) def test_update_config_group(self): """Call ``PATCH /domains/{domain_id}/config/{group}``.""" self.domain_config_api.create_config(self.domain['id'], self.config) new_config = {'ldap': {'url': uuid.uuid4().hex, 'user_filter': uuid.uuid4().hex}} r = self.patch('/domains/%(domain_id)s/config/ldap' % { 'domain_id': self.domain['id']}, body={'config': new_config}) res = self.domain_config_api.get_config(self.domain['id']) expected_config = copy.deepcopy(self.config) expected_config['ldap']['url'] = new_config['ldap']['url'] expected_config['ldap']['user_filter'] = ( new_config['ldap']['user_filter']) self.assertEqual(expected_config, r.result['config']) self.assertEqual(expected_config, res) def test_update_config_group_invalid_domain(self): """Call ``PATCH /domains/{domain_id}/config/{group}`` While updating Identity API-based domain config group with an invalid domain id provided, the request shall be rejected with a response, 404 domain not found. """ self.domain_config_api.create_config(self.domain['id'], self.config) new_config = {'ldap': {'url': uuid.uuid4().hex, 'user_filter': uuid.uuid4().hex}} invalid_domain_id = uuid.uuid4().hex self.patch('/domains/%(domain_id)s/config/ldap' % { 'domain_id': invalid_domain_id}, body={'config': new_config}, expected_status=exception.DomainNotFound.code) def test_update_config_invalid_group(self): """Call ``PATCH /domains/{domain_id}/config/{invalid_group}``.""" self.domain_config_api.create_config(self.domain['id'], self.config) # Trying to update a group that is neither whitelisted or sensitive # should result in Forbidden. invalid_group = uuid.uuid4().hex new_config = {invalid_group: {'url': uuid.uuid4().hex, 'user_filter': uuid.uuid4().hex}} self.patch('/domains/%(domain_id)s/config/%(invalid_group)s' % { 'domain_id': self.domain['id'], 'invalid_group': invalid_group}, body={'config': new_config}, expected_status=http_client.FORBIDDEN) # Trying to update a valid group, but one that is not in the current # config should result in NotFound config = {'ldap': {'suffix': uuid.uuid4().hex}} self.domain_config_api.create_config(self.domain['id'], config) new_config = {'identity': {'driver': uuid.uuid4().hex}} self.patch('/domains/%(domain_id)s/config/identity' % { 'domain_id': self.domain['id']}, body={'config': new_config}, expected_status=http_client.NOT_FOUND) def test_update_config_invalid_group_invalid_domain(self): """Call ``PATCH /domains/{domain_id}/config/{invalid_group}`` While updating Identity API-based domain config with an invalid group and an invalid domain id provided, the request shall be rejected with a response, 404 domain not found. """ self.domain_config_api.create_config(self.domain['id'], self.config) invalid_group = uuid.uuid4().hex new_config = {invalid_group: {'url': uuid.uuid4().hex, 'user_filter': uuid.uuid4().hex}} invalid_domain_id = uuid.uuid4().hex self.patch('/domains/%(domain_id)s/config/%(invalid_group)s' % { 'domain_id': invalid_domain_id, 'invalid_group': invalid_group}, body={'config': new_config}, expected_status=exception.DomainNotFound.code) def test_update_config_option(self): """Call ``PATCH /domains/{domain_id}/config/{group}/{option}``.""" self.domain_config_api.create_config(self.domain['id'], self.config) new_config = {'url': uuid.uuid4().hex} r = self.patch('/domains/%(domain_id)s/config/ldap/url' % { 'domain_id': self.domain['id']}, body={'config': new_config}) res = self.domain_config_api.get_config(self.domain['id']) expected_config = copy.deepcopy(self.config) expected_config['ldap']['url'] = new_config['url'] self.assertEqual(expected_config, r.result['config']) self.assertEqual(expected_config, res) def test_update_config_option_invalid_domain(self): """Call ``PATCH /domains/{domain_id}/config/{group}/{option}`` While updating Identity API-based domain config option with an invalid domain id provided, the request shall be rejected with a response, 404 domain not found. """ self.domain_config_api.create_config(self.domain['id'], self.config) new_config = {'url': uuid.uuid4().hex} invalid_domain_id = uuid.uuid4().hex self.patch('/domains/%(domain_id)s/config/ldap/url' % { 'domain_id': invalid_domain_id}, body={'config': new_config}, expected_status=exception.DomainNotFound.code) def test_update_config_invalid_option(self): """Call ``PATCH /domains/{domain_id}/config/{group}/{invalid}``.""" self.domain_config_api.create_config(self.domain['id'], self.config) invalid_option = uuid.uuid4().hex new_config = {'ldap': {invalid_option: uuid.uuid4().hex}} # Trying to update an option that is neither whitelisted or sensitive # should result in Forbidden. self.patch( '/domains/%(domain_id)s/config/ldap/%(invalid_option)s' % { 'domain_id': self.domain['id'], 'invalid_option': invalid_option}, body={'config': new_config}, expected_status=http_client.FORBIDDEN) # Trying to update a valid option, but one that is not in the current # config should result in NotFound new_config = {'suffix': uuid.uuid4().hex} self.patch( '/domains/%(domain_id)s/config/ldap/suffix' % { 'domain_id': self.domain['id']}, body={'config': new_config}, expected_status=http_client.NOT_FOUND) def test_update_config_invalid_option_invalid_domain(self): """Call ``PATCH /domains/{domain_id}/config/{group}/{invalid}`` While updating Identity API-based domain config with an invalid option and an invalid domain id provided, the request shall be rejected with a response, 404 domain not found. """ self.domain_config_api.create_config(self.domain['id'], self.config) invalid_option = uuid.uuid4().hex new_config = {'ldap': {invalid_option: uuid.uuid4().hex}} invalid_domain_id = uuid.uuid4().hex self.patch( '/domains/%(domain_id)s/config/ldap/%(invalid_option)s' % { 'domain_id': invalid_domain_id, 'invalid_option': invalid_option}, body={'config': new_config}, expected_status=exception.DomainNotFound.code) def test_get_config_default(self): """Call ``GET /domains/config/default``.""" # Create a config that overrides a few of the options so that we can # check that only the defaults are returned. self.domain_config_api.create_config(self.domain['id'], self.config) url = '/domains/config/default' r = self.get(url) default_config = r.result['config'] for group in default_config: for option in default_config[group]: self.assertEqual(getattr(getattr(CONF, group), option), default_config[group][option]) def test_get_config_default_by_group(self): """Call ``GET /domains/config/{group}/default``.""" # Create a config that overrides a few of the options so that we can # check that only the defaults are returned. self.domain_config_api.create_config(self.domain['id'], self.config) url = '/domains/config/ldap/default' r = self.get(url) default_config = r.result['config'] for option in default_config['ldap']: self.assertEqual(getattr(CONF.ldap, option), default_config['ldap'][option]) def test_get_config_default_by_option(self): """Call ``GET /domains/config/{group}/{option}/default``.""" # Create a config that overrides a few of the options so that we can # check that only the defaults are returned. self.domain_config_api.create_config(self.domain['id'], self.config) url = '/domains/config/ldap/url/default' r = self.get(url) default_config = r.result['config'] self.assertEqual(CONF.ldap.url, default_config['url']) def test_get_config_default_by_invalid_group(self): """Call ``GET for /domains/config/{bad-group}/default``.""" # First try a valid group, but one we don't support for domain config self.get('/domains/config/resouce/default', expected_status=http_client.FORBIDDEN) # Now try a totally invalid group url = '/domains/config/%s/default' % uuid.uuid4().hex self.get(url, expected_status=http_client.FORBIDDEN) def test_get_config_default_by_invalid_option(self): """Call ``GET for /domains/config/{group}/{bad-option}/default``.""" # First try a valid option, but one we don't support for domain config, # i.e. one that is in the sensitive options list self.get('/domains/config/ldap/password/default', expected_status=http_client.FORBIDDEN) # Now try a totally invalid option url = '/domains/config/ldap/%s/default' % uuid.uuid4().hex self.get(url, expected_status=http_client.FORBIDDEN) keystone-9.0.0/keystone/tests/unit/test_backend_templated.py0000664000567000056710000002374112701407102025544 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import mock from six.moves import zip from keystone import catalog from keystone.tests import unit from keystone.tests.unit.catalog import test_backends as catalog_tests from keystone.tests.unit import default_fixtures from keystone.tests.unit.ksfixtures import database BROKEN_WRITE_FUNCTIONALITY_MSG = ("Templated backend doesn't correctly " "implement write operations") class TestTemplatedCatalog(unit.TestCase, catalog_tests.CatalogTests): DEFAULT_FIXTURE = { 'RegionOne': { 'compute': { 'adminURL': 'http://localhost:8774/v1.1/bar', 'publicURL': 'http://localhost:8774/v1.1/bar', 'internalURL': 'http://localhost:8774/v1.1/bar', 'name': "'Compute Service'", 'id': '2' }, 'identity': { 'adminURL': 'http://localhost:35357/v2.0', 'publicURL': 'http://localhost:5000/v2.0', 'internalURL': 'http://localhost:35357/v2.0', 'name': "'Identity Service'", 'id': '1' } } } def setUp(self): super(TestTemplatedCatalog, self).setUp() self.useFixture(database.Database()) self.load_backends() self.load_fixtures(default_fixtures) def config_overrides(self): super(TestTemplatedCatalog, self).config_overrides() self.config_fixture.config( group='catalog', driver='templated', template_file=unit.dirs.tests('default_catalog.templates')) def test_get_catalog(self): catalog_ref = self.catalog_api.get_catalog('foo', 'bar') self.assertDictEqual(self.DEFAULT_FIXTURE, catalog_ref) # NOTE(lbragstad): This test is skipped because the catalog is being # modified within the test and not through the API. @unit.skip_if_cache_is_enabled('catalog') def test_catalog_ignored_malformed_urls(self): # both endpoints are in the catalog catalog_ref = self.catalog_api.get_catalog('foo', 'bar') self.assertEqual(2, len(catalog_ref['RegionOne'])) region = self.catalog_api.driver.templates['RegionOne'] region['compute']['adminURL'] = 'http://localhost:8774/v1.1/$(tenant)s' # the malformed one has been removed catalog_ref = self.catalog_api.get_catalog('foo', 'bar') self.assertEqual(1, len(catalog_ref['RegionOne'])) def test_get_catalog_endpoint_disabled(self): self.skipTest("Templated backend doesn't have disabled endpoints") def test_get_v3_catalog_endpoint_disabled(self): self.skipTest("Templated backend doesn't have disabled endpoints") def assert_catalogs_equal(self, expected, observed): sort_key = lambda d: d['id'] for e, o in zip(sorted(expected, key=sort_key), sorted(observed, key=sort_key)): expected_endpoints = e.pop('endpoints') observed_endpoints = o.pop('endpoints') self.assertDictEqual(e, o) self.assertItemsEqual(expected_endpoints, observed_endpoints) def test_get_v3_catalog(self): user_id = uuid.uuid4().hex project_id = uuid.uuid4().hex catalog_ref = self.catalog_api.get_v3_catalog(user_id, project_id) exp_catalog = [ {'endpoints': [ {'interface': 'admin', 'region': 'RegionOne', 'url': 'http://localhost:8774/v1.1/%s' % project_id}, {'interface': 'public', 'region': 'RegionOne', 'url': 'http://localhost:8774/v1.1/%s' % project_id}, {'interface': 'internal', 'region': 'RegionOne', 'url': 'http://localhost:8774/v1.1/%s' % project_id}], 'type': 'compute', 'name': "'Compute Service'", 'id': '2'}, {'endpoints': [ {'interface': 'admin', 'region': 'RegionOne', 'url': 'http://localhost:35357/v2.0'}, {'interface': 'public', 'region': 'RegionOne', 'url': 'http://localhost:5000/v2.0'}, {'interface': 'internal', 'region': 'RegionOne', 'url': 'http://localhost:35357/v2.0'}], 'type': 'identity', 'name': "'Identity Service'", 'id': '1'}] self.assert_catalogs_equal(exp_catalog, catalog_ref) def test_get_catalog_ignores_endpoints_with_invalid_urls(self): user_id = uuid.uuid4().hex tenant_id = None # If the URL has no 'tenant_id' to substitute, we will skip the # endpoint which contains this kind of URL. catalog_ref = self.catalog_api.get_v3_catalog(user_id, tenant_id) exp_catalog = [ {'endpoints': [], 'type': 'compute', 'name': "'Compute Service'", 'id': '2'}, {'endpoints': [ {'interface': 'admin', 'region': 'RegionOne', 'url': 'http://localhost:35357/v2.0'}, {'interface': 'public', 'region': 'RegionOne', 'url': 'http://localhost:5000/v2.0'}, {'interface': 'internal', 'region': 'RegionOne', 'url': 'http://localhost:35357/v2.0'}], 'type': 'identity', 'name': "'Identity Service'", 'id': '1'}] self.assert_catalogs_equal(exp_catalog, catalog_ref) def test_list_regions_filtered_by_parent_region_id(self): self.skipTest('Templated backend does not support hints') def test_service_filtering(self): self.skipTest("Templated backend doesn't support filtering") def test_list_services_with_hints(self): hints = {} services = self.catalog_api.list_services(hints=hints) exp_services = [ {'type': 'compute', 'description': '', 'enabled': True, 'name': "'Compute Service'", 'id': 'compute'}, {'type': 'identity', 'description': '', 'enabled': True, 'name': "'Identity Service'", 'id': 'identity'}] self.assertItemsEqual(exp_services, services) # NOTE(dstanek): the following methods have been overridden # from unit.catalog.test_backends.CatalogTests. def test_region_crud(self): self.skipTest(BROKEN_WRITE_FUNCTIONALITY_MSG) @unit.skip_if_cache_disabled('catalog') def test_cache_layer_region_crud(self): self.skipTest(BROKEN_WRITE_FUNCTIONALITY_MSG) @unit.skip_if_cache_disabled('catalog') def test_invalidate_cache_when_updating_region(self): self.skipTest(BROKEN_WRITE_FUNCTIONALITY_MSG) def test_create_region_with_duplicate_id(self): self.skipTest(BROKEN_WRITE_FUNCTIONALITY_MSG) def test_delete_region_returns_not_found(self): self.skipTest(BROKEN_WRITE_FUNCTIONALITY_MSG) def test_create_region_invalid_parent_region_returns_not_found(self): self.skipTest(BROKEN_WRITE_FUNCTIONALITY_MSG) def test_avoid_creating_circular_references_in_regions_update(self): self.skipTest(BROKEN_WRITE_FUNCTIONALITY_MSG) @mock.patch.object(catalog.Driver, "_ensure_no_circle_in_hierarchical_regions") def test_circular_regions_can_be_deleted(self, mock_ensure_on_circle): self.skipTest(BROKEN_WRITE_FUNCTIONALITY_MSG) def test_service_crud(self): self.skipTest(BROKEN_WRITE_FUNCTIONALITY_MSG) @unit.skip_if_cache_disabled('catalog') def test_cache_layer_service_crud(self): self.skipTest(BROKEN_WRITE_FUNCTIONALITY_MSG) @unit.skip_if_cache_disabled('catalog') def test_invalidate_cache_when_updating_service(self): self.skipTest(BROKEN_WRITE_FUNCTIONALITY_MSG) def test_delete_service_with_endpoint(self): self.skipTest(BROKEN_WRITE_FUNCTIONALITY_MSG) def test_cache_layer_delete_service_with_endpoint(self): self.skipTest(BROKEN_WRITE_FUNCTIONALITY_MSG) def test_delete_service_returns_not_found(self): self.skipTest(BROKEN_WRITE_FUNCTIONALITY_MSG) def test_update_endpoint_nonexistent_service(self): self.skipTest(BROKEN_WRITE_FUNCTIONALITY_MSG) def test_create_endpoint_nonexistent_region(self): self.skipTest(BROKEN_WRITE_FUNCTIONALITY_MSG) def test_update_endpoint_nonexistent_region(self): self.skipTest(BROKEN_WRITE_FUNCTIONALITY_MSG) def test_get_endpoint_returns_not_found(self): self.skipTest("Templated backend doesn't use IDs for endpoints.") def test_delete_endpoint_returns_not_found(self): self.skipTest(BROKEN_WRITE_FUNCTIONALITY_MSG) def test_create_endpoint(self): self.skipTest(BROKEN_WRITE_FUNCTIONALITY_MSG) def test_update_endpoint(self): self.skipTest(BROKEN_WRITE_FUNCTIONALITY_MSG) def test_list_endpoints(self): expected_urls = set(['http://localhost:$(public_port)s/v2.0', 'http://localhost:$(admin_port)s/v2.0', 'http://localhost:8774/v1.1/$(tenant_id)s']) endpoints = self.catalog_api.list_endpoints() self.assertEqual(expected_urls, set(e['url'] for e in endpoints)) @unit.skip_if_cache_disabled('catalog') def test_invalidate_cache_when_updating_endpoint(self): self.skipTest(BROKEN_WRITE_FUNCTIONALITY_MSG) keystone-9.0.0/keystone/tests/unit/test_ldap_pool_livetest.py0000664000567000056710000002014612701407102026002 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import ldappool from oslo_config import cfg from keystone.common.ldap import core as ldap_core from keystone.identity.backends import ldap from keystone.tests import unit from keystone.tests.unit import fakeldap from keystone.tests.unit import test_backend_ldap_pool from keystone.tests.unit import test_ldap_livetest CONF = cfg.CONF class LiveLDAPPoolIdentity(test_backend_ldap_pool.LdapPoolCommonTestMixin, test_ldap_livetest.LiveLDAPIdentity): """Executes existing LDAP live test with pooled LDAP handler. Also executes common pool specific tests via Mixin class. """ def setUp(self): super(LiveLDAPPoolIdentity, self).setUp() self.addCleanup(self.cleanup_pools) # storing to local variable to avoid long references self.conn_pools = ldap_core.PooledLDAPHandler.connection_pools def config_files(self): config_files = super(LiveLDAPPoolIdentity, self).config_files() config_files.append(unit.dirs.tests_conf('backend_pool_liveldap.conf')) return config_files def test_assert_connector_used_not_fake_ldap_pool(self): handler = ldap_core._get_connection(CONF.ldap.url, use_pool=True) self.assertNotEqual(type(handler.Connector), type(fakeldap.FakeLdapPool)) self.assertEqual(type(ldappool.StateConnector), type(handler.Connector)) def test_async_search_and_result3(self): self.config_fixture.config(group='ldap', page_size=1) self.test_user_enable_attribute_mask() def test_pool_size_expands_correctly(self): who = CONF.ldap.user cred = CONF.ldap.password # get related connection manager instance ldappool_cm = self.conn_pools[CONF.ldap.url] def _get_conn(): return ldappool_cm.connection(who, cred) with _get_conn() as c1: # 1 self.assertEqual(1, len(ldappool_cm)) self.assertTrue(c1.connected, True) self.assertTrue(c1.active, True) with _get_conn() as c2: # conn2 self.assertEqual(2, len(ldappool_cm)) self.assertTrue(c2.connected) self.assertTrue(c2.active) self.assertEqual(2, len(ldappool_cm)) # c2 went out of context, its connected but not active self.assertTrue(c2.connected) self.assertFalse(c2.active) with _get_conn() as c3: # conn3 self.assertEqual(2, len(ldappool_cm)) self.assertTrue(c3.connected) self.assertTrue(c3.active) self.assertTrue(c3 is c2) # same connection is reused self.assertTrue(c2.active) with _get_conn() as c4: # conn4 self.assertEqual(3, len(ldappool_cm)) self.assertTrue(c4.connected) self.assertTrue(c4.active) def test_password_change_with_auth_pool_disabled(self): self.config_fixture.config(group='ldap', use_auth_pool=False) old_password = self.user_sna['password'] self.test_password_change_with_pool() self.assertRaises(AssertionError, self.identity_api.authenticate, context={}, user_id=self.user_sna['id'], password=old_password) def _create_user_and_authenticate(self, password): # TODO(shaleh): port to new_user_ref() user_dict = { 'domain_id': CONF.identity.default_domain_id, 'name': uuid.uuid4().hex, 'password': password} user = self.identity_api.create_user(user_dict) self.identity_api.authenticate( context={}, user_id=user['id'], password=password) return self.identity_api.get_user(user['id']) def _get_auth_conn_pool_cm(self): pool_url = ldap_core.PooledLDAPHandler.auth_pool_prefix + CONF.ldap.url return self.conn_pools[pool_url] def _do_password_change_for_one_user(self, password, new_password): self.config_fixture.config(group='ldap', use_auth_pool=True) self.cleanup_pools() self.load_backends() user1 = self._create_user_and_authenticate(password) auth_cm = self._get_auth_conn_pool_cm() self.assertEqual(1, len(auth_cm)) user2 = self._create_user_and_authenticate(password) self.assertEqual(1, len(auth_cm)) user3 = self._create_user_and_authenticate(password) self.assertEqual(1, len(auth_cm)) user4 = self._create_user_and_authenticate(password) self.assertEqual(1, len(auth_cm)) user5 = self._create_user_and_authenticate(password) self.assertEqual(1, len(auth_cm)) # connection pool size remains 1 even for different user ldap bind # as there is only one active connection at a time user_api = ldap.UserApi(CONF) u1_dn = user_api._id_to_dn_string(user1['id']) u2_dn = user_api._id_to_dn_string(user2['id']) u3_dn = user_api._id_to_dn_string(user3['id']) u4_dn = user_api._id_to_dn_string(user4['id']) u5_dn = user_api._id_to_dn_string(user5['id']) # now create multiple active connections for end user auth case which # will force to keep them in pool. After that, modify one of user # password. Need to make sure that user connection is in middle # of pool list. auth_cm = self._get_auth_conn_pool_cm() with auth_cm.connection(u1_dn, password) as _: with auth_cm.connection(u2_dn, password) as _: with auth_cm.connection(u3_dn, password) as _: with auth_cm.connection(u4_dn, password) as _: with auth_cm.connection(u5_dn, password) as _: self.assertEqual(5, len(auth_cm)) _.unbind_s() user3['password'] = new_password self.identity_api.update_user(user3['id'], user3) return user3 def test_password_change_with_auth_pool_enabled_long_lifetime(self): self.config_fixture.config(group='ldap', auth_pool_connection_lifetime=600) old_password = 'my_password' new_password = 'new_password' user = self._do_password_change_for_one_user(old_password, new_password) user.pop('password') # with long connection lifetime auth_pool can bind to old password # successfully which is not desired if password change is frequent # use case in a deployment. # This can happen in multiple concurrent connections case only. user_ref = self.identity_api.authenticate( context={}, user_id=user['id'], password=old_password) self.assertDictEqual(user, user_ref) def test_password_change_with_auth_pool_enabled_no_lifetime(self): self.config_fixture.config(group='ldap', auth_pool_connection_lifetime=0) old_password = 'my_password' new_password = 'new_password' user = self._do_password_change_for_one_user(old_password, new_password) # now as connection lifetime is zero, so authentication # with old password will always fail. self.assertRaises(AssertionError, self.identity_api.authenticate, context={}, user_id=user['id'], password=old_password) keystone-9.0.0/keystone/tests/unit/test_v3_filters.py0000664000567000056710000004200712701407102024172 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack LLC # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_serialization import jsonutils from six.moves import range from keystone.tests import unit from keystone.tests.unit import filtering from keystone.tests.unit import ksfixtures from keystone.tests.unit.ksfixtures import temporaryfile from keystone.tests.unit import test_v3 CONF = cfg.CONF class IdentityTestFilteredCase(filtering.FilterTests, test_v3.RestfulTestCase): """Test filter enforcement on the v3 Identity API.""" def _policy_fixture(self): return ksfixtures.Policy(self.tmpfilename, self.config_fixture) def setUp(self): """Setup for Identity Filter Test Cases.""" self.tempfile = self.useFixture(temporaryfile.SecureTempFile()) self.tmpfilename = self.tempfile.file_name super(IdentityTestFilteredCase, self).setUp() def load_sample_data(self): """Create sample data for these tests. As well as the usual housekeeping, create a set of domains, users, roles and projects for the subsequent tests: - Three domains: A,B & C. C is disabled. - DomainA has user1, DomainB has user2 and user3 - DomainA has group1 and group2, DomainB has group3 - User1 has a role on DomainA Remember that there will also be a fourth domain in existence, the default domain. """ # Start by creating a few domains self._populate_default_domain() self.domainA = unit.new_domain_ref() self.resource_api.create_domain(self.domainA['id'], self.domainA) self.domainB = unit.new_domain_ref() self.resource_api.create_domain(self.domainB['id'], self.domainB) self.domainC = unit.new_domain_ref() self.domainC['enabled'] = False self.resource_api.create_domain(self.domainC['id'], self.domainC) # Now create some users, one in domainA and two of them in domainB self.user1 = unit.create_user(self.identity_api, domain_id=self.domainA['id']) self.user2 = unit.create_user(self.identity_api, domain_id=self.domainB['id']) self.user3 = unit.create_user(self.identity_api, domain_id=self.domainB['id']) self.role = unit.new_role_ref() self.role_api.create_role(self.role['id'], self.role) self.assignment_api.create_grant(self.role['id'], user_id=self.user1['id'], domain_id=self.domainA['id']) # A default auth request we can use - un-scoped user token self.auth = self.build_authentication_request( user_id=self.user1['id'], password=self.user1['password']) def _get_id_list_from_ref_list(self, ref_list): result_list = [] for x in ref_list: result_list.append(x['id']) return result_list def _set_policy(self, new_policy): with open(self.tmpfilename, "w") as policyfile: policyfile.write(jsonutils.dumps(new_policy)) def test_list_users_filtered_by_domain(self): """GET /users?domain_id=mydomain (filtered) Test Plan: - Update policy so api is unprotected - Use an un-scoped token to make sure we can filter the users by domainB, getting back the 2 users in that domain """ self._set_policy({"identity:list_users": []}) url_by_name = '/users?domain_id=%s' % self.domainB['id'] r = self.get(url_by_name, auth=self.auth) # We should get back two users, those in DomainB id_list = self._get_id_list_from_ref_list(r.result.get('users')) self.assertIn(self.user2['id'], id_list) self.assertIn(self.user3['id'], id_list) def test_list_filtered_domains(self): """GET /domains?enabled=0 Test Plan: - Update policy for no protection on api - Filter by the 'enabled' boolean to get disabled domains, which should return just domainC - Try the filter using different ways of specifying True/False to test that our handling of booleans in filter matching is correct """ new_policy = {"identity:list_domains": []} self._set_policy(new_policy) r = self.get('/domains?enabled=0', auth=self.auth) id_list = self._get_id_list_from_ref_list(r.result.get('domains')) self.assertEqual(1, len(id_list)) self.assertIn(self.domainC['id'], id_list) # Try a few ways of specifying 'false' for val in ('0', 'false', 'False', 'FALSE', 'n', 'no', 'off'): r = self.get('/domains?enabled=%s' % val, auth=self.auth) id_list = self._get_id_list_from_ref_list(r.result.get('domains')) self.assertEqual([self.domainC['id']], id_list) # Now try a few ways of specifying 'true' when we should get back # the other two domains, plus the default domain for val in ('1', 'true', 'True', 'TRUE', 'y', 'yes', 'on'): r = self.get('/domains?enabled=%s' % val, auth=self.auth) id_list = self._get_id_list_from_ref_list(r.result.get('domains')) self.assertEqual(3, len(id_list)) self.assertIn(self.domainA['id'], id_list) self.assertIn(self.domainB['id'], id_list) self.assertIn(CONF.identity.default_domain_id, id_list) r = self.get('/domains?enabled', auth=self.auth) id_list = self._get_id_list_from_ref_list(r.result.get('domains')) self.assertEqual(3, len(id_list)) self.assertIn(self.domainA['id'], id_list) self.assertIn(self.domainB['id'], id_list) self.assertIn(CONF.identity.default_domain_id, id_list) def test_multiple_filters(self): """GET /domains?enabled&name=myname Test Plan: - Update policy for no protection on api - Filter by the 'enabled' boolean and name - this should return a single domain """ new_policy = {"identity:list_domains": []} self._set_policy(new_policy) my_url = '/domains?enabled&name=%s' % self.domainA['name'] r = self.get(my_url, auth=self.auth) id_list = self._get_id_list_from_ref_list(r.result.get('domains')) self.assertEqual(1, len(id_list)) self.assertIn(self.domainA['id'], id_list) self.assertIs(True, r.result.get('domains')[0]['enabled']) def test_invalid_filter_is_ignored(self): """GET /domains?enableds&name=myname Test Plan: - Update policy for no protection on api - Filter by name and 'enableds', which does not exist - Assert 'enableds' is ignored """ new_policy = {"identity:list_domains": []} self._set_policy(new_policy) my_url = '/domains?enableds=0&name=%s' % self.domainA['name'] r = self.get(my_url, auth=self.auth) id_list = self._get_id_list_from_ref_list(r.result.get('domains')) # domainA is returned and it is enabled, since enableds=0 is not the # same as enabled=0 self.assertEqual(1, len(id_list)) self.assertIn(self.domainA['id'], id_list) self.assertIs(True, r.result.get('domains')[0]['enabled']) def test_list_users_filtered_by_funny_name(self): """GET /users?name=%myname% Test Plan: - Update policy so api is unprotected - Update a user with name that has filter escape characters - Ensure we can filter on it """ self._set_policy({"identity:list_users": []}) user = self.user1 user['name'] = '%my%name%' self.identity_api.update_user(user['id'], user) url_by_name = '/users?name=%my%name%' r = self.get(url_by_name, auth=self.auth) self.assertEqual(1, len(r.result.get('users'))) self.assertEqual(user['id'], r.result.get('users')[0]['id']) def test_inexact_filters(self): # Create 20 users user_list = self._create_test_data('user', 20) # Set up some names that we can filter on user = user_list[5] user['name'] = 'The' self.identity_api.update_user(user['id'], user) user = user_list[6] user['name'] = 'The Ministry' self.identity_api.update_user(user['id'], user) user = user_list[7] user['name'] = 'The Ministry of' self.identity_api.update_user(user['id'], user) user = user_list[8] user['name'] = 'The Ministry of Silly' self.identity_api.update_user(user['id'], user) user = user_list[9] user['name'] = 'The Ministry of Silly Walks' self.identity_api.update_user(user['id'], user) # ...and one for useful case insensitivity testing user = user_list[10] user['name'] = 'the ministry of silly walks OF' self.identity_api.update_user(user['id'], user) self._set_policy({"identity:list_users": []}) url_by_name = '/users?name__contains=Ministry' r = self.get(url_by_name, auth=self.auth) self.assertEqual(4, len(r.result.get('users'))) self._match_with_list(r.result.get('users'), user_list, list_start=6, list_end=10) url_by_name = '/users?name__icontains=miNIstry' r = self.get(url_by_name, auth=self.auth) self.assertEqual(5, len(r.result.get('users'))) self._match_with_list(r.result.get('users'), user_list, list_start=6, list_end=11) url_by_name = '/users?name__startswith=The' r = self.get(url_by_name, auth=self.auth) self.assertEqual(5, len(r.result.get('users'))) self._match_with_list(r.result.get('users'), user_list, list_start=5, list_end=10) url_by_name = '/users?name__istartswith=the' r = self.get(url_by_name, auth=self.auth) self.assertEqual(6, len(r.result.get('users'))) self._match_with_list(r.result.get('users'), user_list, list_start=5, list_end=11) url_by_name = '/users?name__endswith=of' r = self.get(url_by_name, auth=self.auth) self.assertEqual(1, len(r.result.get('users'))) self.assertEqual(r.result.get('users')[0]['id'], user_list[7]['id']) url_by_name = '/users?name__iendswith=OF' r = self.get(url_by_name, auth=self.auth) self.assertEqual(2, len(r.result.get('users'))) self.assertEqual(user_list[7]['id'], r.result.get('users')[0]['id']) self.assertEqual(user_list[10]['id'], r.result.get('users')[1]['id']) self._delete_test_data('user', user_list) def test_filter_sql_injection_attack(self): """GET /users?name= Test Plan: - Attempt to get all entities back by passing a two-term attribute - Attempt to piggyback filter to damage DB (e.g. drop table) """ self._set_policy({"identity:list_users": [], "identity:list_groups": [], "identity:create_group": []}) url_by_name = "/users?name=anything' or 'x'='x" r = self.get(url_by_name, auth=self.auth) self.assertEqual(0, len(r.result.get('users'))) # See if we can add a SQL command...use the group table instead of the # user table since 'user' is reserved word for SQLAlchemy. group = unit.new_group_ref(domain_id=self.domainB['id']) group = self.identity_api.create_group(group) url_by_name = "/users?name=x'; drop table group" r = self.get(url_by_name, auth=self.auth) # Check group table is still there... url_by_name = "/groups" r = self.get(url_by_name, auth=self.auth) self.assertTrue(len(r.result.get('groups')) > 0) class IdentityTestListLimitCase(IdentityTestFilteredCase): """Test list limiting enforcement on the v3 Identity API.""" content_type = 'json' def setUp(self): """Setup for Identity Limit Test Cases.""" super(IdentityTestListLimitCase, self).setUp() # Create 10 entries for each of the entities we are going to test self.ENTITY_TYPES = ['user', 'group', 'project'] self.entity_lists = {} for entity in self.ENTITY_TYPES: self.entity_lists[entity] = self._create_test_data(entity, 10) # Make sure we clean up when finished self.addCleanup(self.clean_up_entity, entity) self.service_list = [] self.addCleanup(self.clean_up_service) for _ in range(10): new_entity = unit.new_service_ref() service = self.catalog_api.create_service(new_entity['id'], new_entity) self.service_list.append(service) self.policy_list = [] self.addCleanup(self.clean_up_policy) for _ in range(10): new_entity = unit.new_policy_ref() policy = self.policy_api.create_policy(new_entity['id'], new_entity) self.policy_list.append(policy) def clean_up_entity(self, entity): """Clean up entity test data from Identity Limit Test Cases.""" self._delete_test_data(entity, self.entity_lists[entity]) def clean_up_service(self): """Clean up service test data from Identity Limit Test Cases.""" for service in self.service_list: self.catalog_api.delete_service(service['id']) def clean_up_policy(self): """Clean up policy test data from Identity Limit Test Cases.""" for policy in self.policy_list: self.policy_api.delete_policy(policy['id']) def _test_entity_list_limit(self, entity, driver): """GET / (limited) Test Plan: - For the specified type of entity: - Update policy for no protection on api - Add a bunch of entities - Set the global list limit to 5, and check that getting all - entities only returns 5 - Set the driver list_limit to 4, and check that now only 4 are - returned """ if entity == 'policy': plural = 'policies' else: plural = '%ss' % entity self._set_policy({"identity:list_%s" % plural: []}) self.config_fixture.config(list_limit=5) self.config_fixture.config(group=driver, list_limit=None) r = self.get('/%s' % plural, auth=self.auth) self.assertEqual(5, len(r.result.get(plural))) self.assertIs(r.result.get('truncated'), True) self.config_fixture.config(group=driver, list_limit=4) r = self.get('/%s' % plural, auth=self.auth) self.assertEqual(4, len(r.result.get(plural))) self.assertIs(r.result.get('truncated'), True) def test_users_list_limit(self): self._test_entity_list_limit('user', 'identity') def test_groups_list_limit(self): self._test_entity_list_limit('group', 'identity') def test_projects_list_limit(self): self._test_entity_list_limit('project', 'resource') def test_services_list_limit(self): self._test_entity_list_limit('service', 'catalog') def test_non_driver_list_limit(self): """Check list can be limited without driver level support. Policy limiting is not done at the driver level (since it really isn't worth doing it there). So use this as a test for ensuring the controller level will successfully limit in this case. """ self._test_entity_list_limit('policy', 'policy') def test_no_limit(self): """Check truncated attribute not set when list not limited.""" self._set_policy({"identity:list_services": []}) r = self.get('/services', auth=self.auth) self.assertEqual(10, len(r.result.get('services'))) self.assertIsNone(r.result.get('truncated')) def test_at_limit(self): """Check truncated attribute not set when list at max size.""" # Test this by overriding the general limit with a higher # driver-specific limit (allowing all entities to be returned # in the collection), which should result in a non truncated list self._set_policy({"identity:list_services": []}) self.config_fixture.config(list_limit=5) self.config_fixture.config(group='catalog', list_limit=10) r = self.get('/services', auth=self.auth) self.assertEqual(10, len(r.result.get('services'))) self.assertIsNone(r.result.get('truncated')) keystone-9.0.0/keystone/tests/unit/catalog/0000775000567000056710000000000012701407246022121 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/catalog/__init__.py0000664000567000056710000000000012701407102024207 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/catalog/test_core.py0000664000567000056710000001076012701407102024455 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from keystone.catalog import core from keystone import exception from keystone.tests import unit class FormatUrlTests(unit.BaseTestCase): def test_successful_formatting(self): url_template = ('http://$(public_bind_host)s:$(admin_port)d/' '$(tenant_id)s/$(user_id)s/$(project_id)s') project_id = uuid.uuid4().hex values = {'public_bind_host': 'server', 'admin_port': 9090, 'tenant_id': 'A', 'user_id': 'B', 'project_id': project_id} actual_url = core.format_url(url_template, values) expected_url = 'http://server:9090/A/B/%s' % (project_id,) self.assertEqual(expected_url, actual_url) def test_raises_malformed_on_missing_key(self): self.assertRaises(exception.MalformedEndpoint, core.format_url, "http://$(public_bind_host)s/$(public_port)d", {"public_bind_host": "1"}) def test_raises_malformed_on_wrong_type(self): self.assertRaises(exception.MalformedEndpoint, core.format_url, "http://$(public_bind_host)d", {"public_bind_host": "something"}) def test_raises_malformed_on_incomplete_format(self): self.assertRaises(exception.MalformedEndpoint, core.format_url, "http://$(public_bind_host)", {"public_bind_host": "1"}) def test_formatting_a_non_string(self): def _test(url_template): self.assertRaises(exception.MalformedEndpoint, core.format_url, url_template, {}) _test(None) _test(object()) def test_substitution_with_key_not_allowed(self): # If the url template contains a substitution that's not in the allowed # list then MalformedEndpoint is raised. # For example, admin_token isn't allowed. url_template = ('http://$(public_bind_host)s:$(public_port)d/' '$(tenant_id)s/$(user_id)s/$(admin_token)s') values = {'public_bind_host': 'server', 'public_port': 9090, 'tenant_id': 'A', 'user_id': 'B', 'admin_token': 'C'} self.assertRaises(exception.MalformedEndpoint, core.format_url, url_template, values) def test_substitution_with_allowed_tenant_keyerror(self): # No value of 'tenant_id' is passed into url_template. # mod: format_url will return None instead of raising # "MalformedEndpoint" exception. # This is intentional behavior since we don't want to skip # all the later endpoints once there is an URL of endpoint # trying to replace 'tenant_id' with None. url_template = ('http://$(public_bind_host)s:$(admin_port)d/' '$(tenant_id)s/$(user_id)s') values = {'public_bind_host': 'server', 'admin_port': 9090, 'user_id': 'B'} self.assertIsNone(core.format_url(url_template, values, silent_keyerror_failures=['tenant_id'])) def test_substitution_with_allowed_project_keyerror(self): # No value of 'project_id' is passed into url_template. # mod: format_url will return None instead of raising # "MalformedEndpoint" exception. # This is intentional behavior since we don't want to skip # all the later endpoints once there is an URL of endpoint # trying to replace 'project_id' with None. url_template = ('http://$(public_bind_host)s:$(admin_port)d/' '$(project_id)s/$(user_id)s') values = {'public_bind_host': 'server', 'admin_port': 9090, 'user_id': 'B'} self.assertIsNone(core.format_url(url_template, values, silent_keyerror_failures=['project_id'])) keystone-9.0.0/keystone/tests/unit/catalog/test_backends.py0000664000567000056710000006303412701407102025301 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import uuid import mock from six.moves import range from testtools import matchers from keystone.catalog import core from keystone.common import driver_hints from keystone import exception from keystone.tests import unit class CatalogTests(object): _legacy_endpoint_id_in_endpoint = True _enabled_default_to_true_when_creating_endpoint = False def test_region_crud(self): # create region_id = '0' * 255 new_region = unit.new_region_ref(id=region_id) res = self.catalog_api.create_region(new_region) # Ensure that we don't need to have a # parent_region_id in the original supplied # ref dict, but that it will be returned from # the endpoint, with None value. expected_region = new_region.copy() expected_region['parent_region_id'] = None self.assertDictEqual(expected_region, res) # Test adding another region with the one above # as its parent. We will check below whether deleting # the parent successfully deletes any child regions. parent_region_id = region_id new_region = unit.new_region_ref(parent_region_id=parent_region_id) region_id = new_region['id'] res = self.catalog_api.create_region(new_region) self.assertDictEqual(new_region, res) # list regions = self.catalog_api.list_regions() self.assertThat(regions, matchers.HasLength(2)) region_ids = [x['id'] for x in regions] self.assertIn(parent_region_id, region_ids) self.assertIn(region_id, region_ids) # update region_desc_update = {'description': uuid.uuid4().hex} res = self.catalog_api.update_region(region_id, region_desc_update) expected_region = new_region.copy() expected_region['description'] = region_desc_update['description'] self.assertDictEqual(expected_region, res) # delete self.catalog_api.delete_region(parent_region_id) self.assertRaises(exception.RegionNotFound, self.catalog_api.delete_region, parent_region_id) self.assertRaises(exception.RegionNotFound, self.catalog_api.get_region, parent_region_id) # Ensure the child is also gone... self.assertRaises(exception.RegionNotFound, self.catalog_api.get_region, region_id) def _create_region_with_parent_id(self, parent_id=None): new_region = unit.new_region_ref(parent_region_id=parent_id) self.catalog_api.create_region(new_region) return new_region def test_list_regions_filtered_by_parent_region_id(self): new_region = self._create_region_with_parent_id() parent_id = new_region['id'] new_region = self._create_region_with_parent_id(parent_id) new_region = self._create_region_with_parent_id(parent_id) # filter by parent_region_id hints = driver_hints.Hints() hints.add_filter('parent_region_id', parent_id) regions = self.catalog_api.list_regions(hints) for region in regions: self.assertEqual(parent_id, region['parent_region_id']) @unit.skip_if_cache_disabled('catalog') def test_cache_layer_region_crud(self): new_region = unit.new_region_ref() region_id = new_region['id'] self.catalog_api.create_region(new_region.copy()) updated_region = copy.deepcopy(new_region) updated_region['description'] = uuid.uuid4().hex # cache the result self.catalog_api.get_region(region_id) # update the region bypassing catalog_api self.catalog_api.driver.update_region(region_id, updated_region) self.assertDictContainsSubset(new_region, self.catalog_api.get_region(region_id)) self.catalog_api.get_region.invalidate(self.catalog_api, region_id) self.assertDictContainsSubset(updated_region, self.catalog_api.get_region(region_id)) # delete the region self.catalog_api.driver.delete_region(region_id) # still get the old region self.assertDictContainsSubset(updated_region, self.catalog_api.get_region(region_id)) self.catalog_api.get_region.invalidate(self.catalog_api, region_id) self.assertRaises(exception.RegionNotFound, self.catalog_api.get_region, region_id) @unit.skip_if_cache_disabled('catalog') def test_invalidate_cache_when_updating_region(self): new_region = unit.new_region_ref() region_id = new_region['id'] self.catalog_api.create_region(new_region) # cache the region self.catalog_api.get_region(region_id) # update the region via catalog_api new_description = {'description': uuid.uuid4().hex} self.catalog_api.update_region(region_id, new_description) # assert that we can get the new region current_region = self.catalog_api.get_region(region_id) self.assertEqual(new_description['description'], current_region['description']) def test_create_region_with_duplicate_id(self): new_region = unit.new_region_ref() self.catalog_api.create_region(new_region) # Create region again with duplicate id self.assertRaises(exception.Conflict, self.catalog_api.create_region, new_region) def test_get_region_returns_not_found(self): self.assertRaises(exception.RegionNotFound, self.catalog_api.get_region, uuid.uuid4().hex) def test_delete_region_returns_not_found(self): self.assertRaises(exception.RegionNotFound, self.catalog_api.delete_region, uuid.uuid4().hex) def test_create_region_invalid_parent_region_returns_not_found(self): new_region = unit.new_region_ref(parent_region_id='nonexisting') self.assertRaises(exception.RegionNotFound, self.catalog_api.create_region, new_region) def test_avoid_creating_circular_references_in_regions_update(self): region_one = self._create_region_with_parent_id() # self circle: region_one->region_one self.assertRaises(exception.CircularRegionHierarchyError, self.catalog_api.update_region, region_one['id'], {'parent_region_id': region_one['id']}) # region_one->region_two->region_one region_two = self._create_region_with_parent_id(region_one['id']) self.assertRaises(exception.CircularRegionHierarchyError, self.catalog_api.update_region, region_one['id'], {'parent_region_id': region_two['id']}) # region_one region_two->region_three->region_four->region_two region_three = self._create_region_with_parent_id(region_two['id']) region_four = self._create_region_with_parent_id(region_three['id']) self.assertRaises(exception.CircularRegionHierarchyError, self.catalog_api.update_region, region_two['id'], {'parent_region_id': region_four['id']}) @mock.patch.object(core.CatalogDriverV8, "_ensure_no_circle_in_hierarchical_regions") def test_circular_regions_can_be_deleted(self, mock_ensure_on_circle): # turn off the enforcement so that cycles can be created for the test mock_ensure_on_circle.return_value = None region_one = self._create_region_with_parent_id() # self circle: region_one->region_one self.catalog_api.update_region( region_one['id'], {'parent_region_id': region_one['id']}) self.catalog_api.delete_region(region_one['id']) self.assertRaises(exception.RegionNotFound, self.catalog_api.get_region, region_one['id']) # region_one->region_two->region_one region_one = self._create_region_with_parent_id() region_two = self._create_region_with_parent_id(region_one['id']) self.catalog_api.update_region( region_one['id'], {'parent_region_id': region_two['id']}) self.catalog_api.delete_region(region_one['id']) self.assertRaises(exception.RegionNotFound, self.catalog_api.get_region, region_one['id']) self.assertRaises(exception.RegionNotFound, self.catalog_api.get_region, region_two['id']) # region_one->region_two->region_three->region_one region_one = self._create_region_with_parent_id() region_two = self._create_region_with_parent_id(region_one['id']) region_three = self._create_region_with_parent_id(region_two['id']) self.catalog_api.update_region( region_one['id'], {'parent_region_id': region_three['id']}) self.catalog_api.delete_region(region_two['id']) self.assertRaises(exception.RegionNotFound, self.catalog_api.get_region, region_two['id']) self.assertRaises(exception.RegionNotFound, self.catalog_api.get_region, region_one['id']) self.assertRaises(exception.RegionNotFound, self.catalog_api.get_region, region_three['id']) def test_service_crud(self): # create new_service = unit.new_service_ref() service_id = new_service['id'] res = self.catalog_api.create_service(service_id, new_service) self.assertDictEqual(new_service, res) # list services = self.catalog_api.list_services() self.assertIn(service_id, [x['id'] for x in services]) # update service_name_update = {'name': uuid.uuid4().hex} res = self.catalog_api.update_service(service_id, service_name_update) expected_service = new_service.copy() expected_service['name'] = service_name_update['name'] self.assertDictEqual(expected_service, res) # delete self.catalog_api.delete_service(service_id) self.assertRaises(exception.ServiceNotFound, self.catalog_api.delete_service, service_id) self.assertRaises(exception.ServiceNotFound, self.catalog_api.get_service, service_id) def _create_random_service(self): new_service = unit.new_service_ref() service_id = new_service['id'] return self.catalog_api.create_service(service_id, new_service) def test_service_filtering(self): target_service = self._create_random_service() unrelated_service1 = self._create_random_service() unrelated_service2 = self._create_random_service() # filter by type hint_for_type = driver_hints.Hints() hint_for_type.add_filter(name="type", value=target_service['type']) services = self.catalog_api.list_services(hint_for_type) self.assertEqual(1, len(services)) filtered_service = services[0] self.assertEqual(target_service['type'], filtered_service['type']) self.assertEqual(target_service['id'], filtered_service['id']) # filter should have been removed, since it was already used by the # backend self.assertEqual(0, len(hint_for_type.filters)) # the backend shouldn't filter by name, since this is handled by the # front end hint_for_name = driver_hints.Hints() hint_for_name.add_filter(name="name", value=target_service['name']) services = self.catalog_api.list_services(hint_for_name) self.assertEqual(3, len(services)) # filter should still be there, since it wasn't used by the backend self.assertEqual(1, len(hint_for_name.filters)) self.catalog_api.delete_service(target_service['id']) self.catalog_api.delete_service(unrelated_service1['id']) self.catalog_api.delete_service(unrelated_service2['id']) @unit.skip_if_cache_disabled('catalog') def test_cache_layer_service_crud(self): new_service = unit.new_service_ref() service_id = new_service['id'] res = self.catalog_api.create_service(service_id, new_service) self.assertDictEqual(new_service, res) self.catalog_api.get_service(service_id) updated_service = copy.deepcopy(new_service) updated_service['description'] = uuid.uuid4().hex # update bypassing catalog api self.catalog_api.driver.update_service(service_id, updated_service) self.assertDictContainsSubset(new_service, self.catalog_api.get_service(service_id)) self.catalog_api.get_service.invalidate(self.catalog_api, service_id) self.assertDictContainsSubset(updated_service, self.catalog_api.get_service(service_id)) # delete bypassing catalog api self.catalog_api.driver.delete_service(service_id) self.assertDictContainsSubset(updated_service, self.catalog_api.get_service(service_id)) self.catalog_api.get_service.invalidate(self.catalog_api, service_id) self.assertRaises(exception.ServiceNotFound, self.catalog_api.delete_service, service_id) self.assertRaises(exception.ServiceNotFound, self.catalog_api.get_service, service_id) @unit.skip_if_cache_disabled('catalog') def test_invalidate_cache_when_updating_service(self): new_service = unit.new_service_ref() service_id = new_service['id'] self.catalog_api.create_service(service_id, new_service) # cache the service self.catalog_api.get_service(service_id) # update the service via catalog api new_type = {'type': uuid.uuid4().hex} self.catalog_api.update_service(service_id, new_type) # assert that we can get the new service current_service = self.catalog_api.get_service(service_id) self.assertEqual(new_type['type'], current_service['type']) def test_delete_service_with_endpoint(self): # create a service service = unit.new_service_ref() self.catalog_api.create_service(service['id'], service) # create an endpoint attached to the service endpoint = unit.new_endpoint_ref(service_id=service['id'], region_id=None) self.catalog_api.create_endpoint(endpoint['id'], endpoint) # deleting the service should also delete the endpoint self.catalog_api.delete_service(service['id']) self.assertRaises(exception.EndpointNotFound, self.catalog_api.get_endpoint, endpoint['id']) self.assertRaises(exception.EndpointNotFound, self.catalog_api.delete_endpoint, endpoint['id']) def test_cache_layer_delete_service_with_endpoint(self): service = unit.new_service_ref() self.catalog_api.create_service(service['id'], service) # create an endpoint attached to the service endpoint = unit.new_endpoint_ref(service_id=service['id'], region_id=None) self.catalog_api.create_endpoint(endpoint['id'], endpoint) # cache the result self.catalog_api.get_service(service['id']) self.catalog_api.get_endpoint(endpoint['id']) # delete the service bypassing catalog api self.catalog_api.driver.delete_service(service['id']) self.assertDictContainsSubset(endpoint, self.catalog_api. get_endpoint(endpoint['id'])) self.assertDictContainsSubset(service, self.catalog_api. get_service(service['id'])) self.catalog_api.get_endpoint.invalidate(self.catalog_api, endpoint['id']) self.assertRaises(exception.EndpointNotFound, self.catalog_api.get_endpoint, endpoint['id']) self.assertRaises(exception.EndpointNotFound, self.catalog_api.delete_endpoint, endpoint['id']) # multiple endpoints associated with a service second_endpoint = unit.new_endpoint_ref(service_id=service['id'], region_id=None) self.catalog_api.create_service(service['id'], service) self.catalog_api.create_endpoint(endpoint['id'], endpoint) self.catalog_api.create_endpoint(second_endpoint['id'], second_endpoint) self.catalog_api.delete_service(service['id']) self.assertRaises(exception.EndpointNotFound, self.catalog_api.get_endpoint, endpoint['id']) self.assertRaises(exception.EndpointNotFound, self.catalog_api.delete_endpoint, endpoint['id']) self.assertRaises(exception.EndpointNotFound, self.catalog_api.get_endpoint, second_endpoint['id']) self.assertRaises(exception.EndpointNotFound, self.catalog_api.delete_endpoint, second_endpoint['id']) def test_get_service_returns_not_found(self): self.assertRaises(exception.ServiceNotFound, self.catalog_api.get_service, uuid.uuid4().hex) def test_delete_service_returns_not_found(self): self.assertRaises(exception.ServiceNotFound, self.catalog_api.delete_service, uuid.uuid4().hex) def test_create_endpoint_nonexistent_service(self): endpoint = unit.new_endpoint_ref(service_id=uuid.uuid4().hex, region_id=None) self.assertRaises(exception.ValidationError, self.catalog_api.create_endpoint, endpoint['id'], endpoint) def test_update_endpoint_nonexistent_service(self): dummy_service, enabled_endpoint, dummy_disabled_endpoint = ( self._create_endpoints()) new_endpoint = unit.new_endpoint_ref(service_id=uuid.uuid4().hex) self.assertRaises(exception.ValidationError, self.catalog_api.update_endpoint, enabled_endpoint['id'], new_endpoint) def test_create_endpoint_nonexistent_region(self): service = unit.new_service_ref() self.catalog_api.create_service(service['id'], service) endpoint = unit.new_endpoint_ref(service_id=service['id']) self.assertRaises(exception.ValidationError, self.catalog_api.create_endpoint, endpoint['id'], endpoint) def test_update_endpoint_nonexistent_region(self): dummy_service, enabled_endpoint, dummy_disabled_endpoint = ( self._create_endpoints()) new_endpoint = unit.new_endpoint_ref(service_id=uuid.uuid4().hex) self.assertRaises(exception.ValidationError, self.catalog_api.update_endpoint, enabled_endpoint['id'], new_endpoint) def test_get_endpoint_returns_not_found(self): self.assertRaises(exception.EndpointNotFound, self.catalog_api.get_endpoint, uuid.uuid4().hex) def test_delete_endpoint_returns_not_found(self): self.assertRaises(exception.EndpointNotFound, self.catalog_api.delete_endpoint, uuid.uuid4().hex) def test_create_endpoint(self): service = unit.new_service_ref() self.catalog_api.create_service(service['id'], service) endpoint = unit.new_endpoint_ref(service_id=service['id'], region_id=None) self.catalog_api.create_endpoint(endpoint['id'], endpoint.copy()) def test_update_endpoint(self): dummy_service_ref, endpoint_ref, dummy_disabled_endpoint_ref = ( self._create_endpoints()) res = self.catalog_api.update_endpoint(endpoint_ref['id'], {'interface': 'private'}) expected_endpoint = endpoint_ref.copy() expected_endpoint['enabled'] = True expected_endpoint['interface'] = 'private' if self._legacy_endpoint_id_in_endpoint: expected_endpoint['legacy_endpoint_id'] = None if self._enabled_default_to_true_when_creating_endpoint: expected_endpoint['enabled'] = True self.assertDictEqual(expected_endpoint, res) def _create_endpoints(self): # Creates a service and 2 endpoints for the service in the same region. # The 'public' interface is enabled and the 'internal' interface is # disabled. def create_endpoint(service_id, region, **kwargs): ref = unit.new_endpoint_ref( service_id=service_id, region_id=region, url='http://localhost/%s' % uuid.uuid4().hex, **kwargs) self.catalog_api.create_endpoint(ref['id'], ref) return ref # Create a service for use with the endpoints. service_ref = unit.new_service_ref() service_id = service_ref['id'] self.catalog_api.create_service(service_id, service_ref) region = unit.new_region_ref() self.catalog_api.create_region(region) # Create endpoints enabled_endpoint_ref = create_endpoint(service_id, region['id']) disabled_endpoint_ref = create_endpoint( service_id, region['id'], enabled=False, interface='internal') return service_ref, enabled_endpoint_ref, disabled_endpoint_ref def test_list_endpoints(self): service = unit.new_service_ref() self.catalog_api.create_service(service['id'], service) expected_ids = set([uuid.uuid4().hex for _ in range(3)]) for endpoint_id in expected_ids: endpoint = unit.new_endpoint_ref(service_id=service['id'], id=endpoint_id, region_id=None) self.catalog_api.create_endpoint(endpoint['id'], endpoint) endpoints = self.catalog_api.list_endpoints() self.assertEqual(expected_ids, set(e['id'] for e in endpoints)) def test_get_catalog_endpoint_disabled(self): """Get back only enabled endpoints when get the v2 catalog.""" service_ref, enabled_endpoint_ref, dummy_disabled_endpoint_ref = ( self._create_endpoints()) user_id = uuid.uuid4().hex project_id = uuid.uuid4().hex catalog = self.catalog_api.get_catalog(user_id, project_id) exp_entry = { 'id': enabled_endpoint_ref['id'], 'name': service_ref['name'], 'publicURL': enabled_endpoint_ref['url'], } region = enabled_endpoint_ref['region_id'] self.assertEqual(exp_entry, catalog[region][service_ref['type']]) def test_get_v3_catalog_endpoint_disabled(self): """Get back only enabled endpoints when get the v3 catalog.""" enabled_endpoint_ref = self._create_endpoints()[1] user_id = uuid.uuid4().hex project_id = uuid.uuid4().hex catalog = self.catalog_api.get_v3_catalog(user_id, project_id) endpoint_ids = [x['id'] for x in catalog[0]['endpoints']] self.assertEqual([enabled_endpoint_ref['id']], endpoint_ids) @unit.skip_if_cache_disabled('catalog') def test_invalidate_cache_when_updating_endpoint(self): service = unit.new_service_ref() self.catalog_api.create_service(service['id'], service) # create an endpoint attached to the service endpoint = unit.new_endpoint_ref(service_id=service['id'], region_id=None) self.catalog_api.create_endpoint(endpoint['id'], endpoint) # cache the endpoint self.catalog_api.get_endpoint(endpoint['id']) # update the endpoint via catalog api new_url = {'url': uuid.uuid4().hex} self.catalog_api.update_endpoint(endpoint['id'], new_url) # assert that we can get the new endpoint current_endpoint = self.catalog_api.get_endpoint(endpoint['id']) self.assertEqual(new_url['url'], current_endpoint['url']) keystone-9.0.0/keystone/tests/unit/test_auth.py0000664000567000056710000017022412701407102023056 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime import random import string import uuid import mock from oslo_config import cfg import oslo_utils.fixture from oslo_utils import timeutils import six from testtools import matchers from keystone import assignment from keystone import auth from keystone.common import authorization from keystone.common import config from keystone import exception from keystone.models import token_model from keystone.tests import unit from keystone.tests.unit import default_fixtures from keystone.tests.unit.ksfixtures import database from keystone import token from keystone.token import provider from keystone import trust CONF = cfg.CONF TIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ' HOST = ''.join(random.choice(string.ascii_lowercase) for x in range( random.randint(5, 15))) HOST_URL = 'http://%s' % (HOST) def _build_user_auth(token=None, user_id=None, username=None, password=None, tenant_id=None, tenant_name=None, trust_id=None): """Build auth dictionary. It will create an auth dictionary based on all the arguments that it receives. """ auth_json = {} if token is not None: auth_json['token'] = token if username or password: auth_json['passwordCredentials'] = {} if username is not None: auth_json['passwordCredentials']['username'] = username if user_id is not None: auth_json['passwordCredentials']['userId'] = user_id if password is not None: auth_json['passwordCredentials']['password'] = password if tenant_name is not None: auth_json['tenantName'] = tenant_name if tenant_id is not None: auth_json['tenantId'] = tenant_id if trust_id is not None: auth_json['trust_id'] = trust_id return auth_json class AuthTest(unit.TestCase): def setUp(self): self.useFixture(database.Database()) super(AuthTest, self).setUp() self.time_fixture = self.useFixture(oslo_utils.fixture.TimeFixture()) self.load_backends() self.load_fixtures(default_fixtures) self.context_with_remote_user = {'environment': {'REMOTE_USER': 'FOO', 'AUTH_TYPE': 'Negotiate'}} self.empty_context = {'environment': {}} self.controller = token.controllers.Auth() def assertEqualTokens(self, a, b, enforce_audit_ids=True): """Assert that two tokens are equal. Compare two tokens except for their ids. This also truncates the time in the comparison. """ def normalize(token): token['access']['token']['id'] = 'dummy' del token['access']['token']['expires'] del token['access']['token']['issued_at'] del token['access']['token']['audit_ids'] return token self.assertCloseEnoughForGovernmentWork( timeutils.parse_isotime(a['access']['token']['expires']), timeutils.parse_isotime(b['access']['token']['expires'])) self.assertCloseEnoughForGovernmentWork( timeutils.parse_isotime(a['access']['token']['issued_at']), timeutils.parse_isotime(b['access']['token']['issued_at'])) if enforce_audit_ids: self.assertIn(a['access']['token']['audit_ids'][0], b['access']['token']['audit_ids']) self.assertThat(len(a['access']['token']['audit_ids']), matchers.LessThan(3)) self.assertThat(len(b['access']['token']['audit_ids']), matchers.LessThan(3)) return self.assertDictEqual(normalize(a), normalize(b)) class AuthBadRequests(AuthTest): def test_no_external_auth(self): """Verify that _authenticate_external() raises exception if N/A.""" self.assertRaises( token.controllers.ExternalAuthNotApplicable, self.controller._authenticate_external, context={}, auth={}) def test_empty_remote_user(self): """Verify exception is raised when REMOTE_USER is an empty string.""" context = {'environment': {'REMOTE_USER': ''}} self.assertRaises( token.controllers.ExternalAuthNotApplicable, self.controller._authenticate_external, context=context, auth={}) def test_no_token_in_auth(self): """Verify that _authenticate_token() raises exception if no token.""" self.assertRaises( exception.ValidationError, self.controller._authenticate_token, None, {}) def test_no_credentials_in_auth(self): """Verify that _authenticate_local() raises exception if no creds.""" self.assertRaises( exception.ValidationError, self.controller._authenticate_local, None, {}) def test_empty_username_and_userid_in_auth(self): """Verify that empty username and userID raises ValidationError.""" self.assertRaises( exception.ValidationError, self.controller._authenticate_local, None, {'passwordCredentials': {'password': 'abc', 'userId': '', 'username': ''}}) def test_authenticate_blank_request_body(self): """Verify sending empty json dict raises the right exception.""" self.assertRaises(exception.ValidationError, self.controller.authenticate, {}, {}) def test_authenticate_blank_auth(self): """Verify sending blank 'auth' raises the right exception.""" body_dict = _build_user_auth() self.assertRaises(exception.ValidationError, self.controller.authenticate, {}, body_dict) def test_authenticate_invalid_auth_content(self): """Verify sending invalid 'auth' raises the right exception.""" self.assertRaises(exception.ValidationError, self.controller.authenticate, {}, {'auth': 'abcd'}) def test_authenticate_user_id_too_large(self): """Verify sending large 'userId' raises the right exception.""" body_dict = _build_user_auth(user_id='0' * 65, username='FOO', password='foo2') self.assertRaises(exception.ValidationSizeError, self.controller.authenticate, {}, body_dict) def test_authenticate_username_too_large(self): """Verify sending large 'username' raises the right exception.""" body_dict = _build_user_auth(username='0' * 65, password='foo2') self.assertRaises(exception.ValidationSizeError, self.controller.authenticate, {}, body_dict) def test_authenticate_tenant_id_too_large(self): """Verify sending large 'tenantId' raises the right exception.""" body_dict = _build_user_auth(username='FOO', password='foo2', tenant_id='0' * 65) self.assertRaises(exception.ValidationSizeError, self.controller.authenticate, {}, body_dict) def test_authenticate_tenant_name_too_large(self): """Verify sending large 'tenantName' raises the right exception.""" body_dict = _build_user_auth(username='FOO', password='foo2', tenant_name='0' * 65) self.assertRaises(exception.ValidationSizeError, self.controller.authenticate, {}, body_dict) def test_authenticate_token_too_large(self): """Verify sending large 'token' raises the right exception.""" body_dict = _build_user_auth(token={'id': '0' * 8193}) self.assertRaises(exception.ValidationSizeError, self.controller.authenticate, {}, body_dict) def test_authenticate_password_too_large(self): """Verify sending large 'password' raises the right exception.""" length = CONF.identity.max_password_length + 1 body_dict = _build_user_auth(username='FOO', password='0' * length) self.assertRaises(exception.ValidationSizeError, self.controller.authenticate, {}, body_dict) def test_authenticate_fails_if_project_unsafe(self): """Verify authenticate to a project with unsafe name fails.""" # Start with url name restrictions off, so we can create the unsafe # named project self.config_fixture.config(group='resource', project_name_url_safe='off') unsafe_name = 'i am not / safe' project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id, name=unsafe_name) self.resource_api.create_project(project['id'], project) self.assignment_api.add_role_to_user_and_project( self.user_foo['id'], project['id'], self.role_member['id']) no_context = {} body_dict = _build_user_auth( username=self.user_foo['name'], password=self.user_foo['password'], tenant_name=project['name']) # Since name url restriction is off, we should be able to autenticate self.controller.authenticate(no_context, body_dict) # Set the name url restriction to strict and we should fail to # authenticate self.config_fixture.config(group='resource', project_name_url_safe='strict') self.assertRaises(exception.Unauthorized, self.controller.authenticate, no_context, body_dict) class AuthWithToken(AuthTest): def test_unscoped_token(self): """Verify getting an unscoped token with password creds.""" body_dict = _build_user_auth(username='FOO', password='foo2') unscoped_token = self.controller.authenticate({}, body_dict) self.assertNotIn('tenant', unscoped_token['access']['token']) def test_auth_invalid_token(self): """Verify exception is raised if invalid token.""" body_dict = _build_user_auth(token={"id": uuid.uuid4().hex}) self.assertRaises( exception.Unauthorized, self.controller.authenticate, {}, body_dict) def test_auth_bad_formatted_token(self): """Verify exception is raised if invalid token.""" body_dict = _build_user_auth(token={}) self.assertRaises( exception.ValidationError, self.controller.authenticate, {}, body_dict) def test_auth_unscoped_token_no_project(self): """Verify getting an unscoped token with an unscoped token.""" body_dict = _build_user_auth( username='FOO', password='foo2') unscoped_token = self.controller.authenticate({}, body_dict) body_dict = _build_user_auth( token=unscoped_token["access"]["token"]) unscoped_token_2 = self.controller.authenticate({}, body_dict) self.assertEqualTokens(unscoped_token, unscoped_token_2) def test_auth_unscoped_token_project(self): """Verify getting a token in a tenant with an unscoped token.""" # Add a role in so we can check we get this back self.assignment_api.add_role_to_user_and_project( self.user_foo['id'], self.tenant_bar['id'], self.role_member['id']) # Get an unscoped token body_dict = _build_user_auth( username='FOO', password='foo2') unscoped_token = self.controller.authenticate({}, body_dict) # Get a token on BAR tenant using the unscoped token body_dict = _build_user_auth( token=unscoped_token["access"]["token"], tenant_name="BAR") scoped_token = self.controller.authenticate({}, body_dict) tenant = scoped_token["access"]["token"]["tenant"] roles = scoped_token["access"]["metadata"]["roles"] self.assertEqual(self.tenant_bar['id'], tenant["id"]) self.assertThat(roles, matchers.Contains(self.role_member['id'])) def test_auth_scoped_token_bad_project_with_debug(self): """Authenticating with an invalid project fails.""" # Bug 1379952 reports poor user feedback, even in insecure_debug mode, # when the user accidentally passes a project name as an ID. # This test intentionally does exactly that. body_dict = _build_user_auth( username=self.user_foo['name'], password=self.user_foo['password'], tenant_id=self.tenant_bar['name']) # with insecure_debug enabled, this produces a friendly exception. self.config_fixture.config(debug=True, insecure_debug=True) e = self.assertRaises( exception.Unauthorized, self.controller.authenticate, {}, body_dict) # explicitly verify that the error message shows that a *name* is # found where an *ID* is expected self.assertIn( 'Project ID not found: %s' % self.tenant_bar['name'], six.text_type(e)) def test_auth_scoped_token_bad_project_without_debug(self): """Authenticating with an invalid project fails.""" # Bug 1379952 reports poor user feedback, even in insecure_debug mode, # when the user accidentally passes a project name as an ID. # This test intentionally does exactly that. body_dict = _build_user_auth( username=self.user_foo['name'], password=self.user_foo['password'], tenant_id=self.tenant_bar['name']) # with insecure_debug disabled (the default), authentication failure # details are suppressed. e = self.assertRaises( exception.Unauthorized, self.controller.authenticate, {}, body_dict) # explicitly verify that the error message details above have been # suppressed. self.assertNotIn( 'Project ID not found: %s' % self.tenant_bar['name'], six.text_type(e)) def test_auth_token_project_group_role(self): """Verify getting a token in a tenant with group roles.""" # Add a v2 style role in so we can check we get this back self.assignment_api.add_role_to_user_and_project( self.user_foo['id'], self.tenant_bar['id'], self.role_member['id']) # Now create a group role for this user as well domain1 = unit.new_domain_ref() self.resource_api.create_domain(domain1['id'], domain1) new_group = unit.new_group_ref(domain_id=domain1['id']) new_group = self.identity_api.create_group(new_group) self.identity_api.add_user_to_group(self.user_foo['id'], new_group['id']) self.assignment_api.create_grant( group_id=new_group['id'], project_id=self.tenant_bar['id'], role_id=self.role_admin['id']) # Get a scoped token for the tenant body_dict = _build_user_auth( username='FOO', password='foo2', tenant_name="BAR") scoped_token = self.controller.authenticate({}, body_dict) tenant = scoped_token["access"]["token"]["tenant"] roles = scoped_token["access"]["metadata"]["roles"] self.assertEqual(self.tenant_bar['id'], tenant["id"]) self.assertIn(self.role_member['id'], roles) self.assertIn(self.role_admin['id'], roles) def test_belongs_to_no_tenant(self): r = self.controller.authenticate( {}, auth={ 'passwordCredentials': { 'username': self.user_foo['name'], 'password': self.user_foo['password'] } }) unscoped_token_id = r['access']['token']['id'] self.assertRaises( exception.Unauthorized, self.controller.validate_token, dict(is_admin=True, query_string={'belongsTo': 'BAR'}), token_id=unscoped_token_id) def test_belongs_to(self): body_dict = _build_user_auth( username='FOO', password='foo2', tenant_name="BAR") scoped_token = self.controller.authenticate({}, body_dict) scoped_token_id = scoped_token['access']['token']['id'] self.assertRaises( exception.Unauthorized, self.controller.validate_token, dict(is_admin=True, query_string={'belongsTo': 'me'}), token_id=scoped_token_id) self.assertRaises( exception.Unauthorized, self.controller.validate_token, dict(is_admin=True, query_string={'belongsTo': 'BAR'}), token_id=scoped_token_id) def test_token_auth_with_binding(self): self.config_fixture.config(group='token', bind=['kerberos']) body_dict = _build_user_auth() unscoped_token = self.controller.authenticate( self.context_with_remote_user, body_dict) # the token should have bind information in it bind = unscoped_token['access']['token']['bind'] self.assertEqual('FOO', bind['kerberos']) body_dict = _build_user_auth( token=unscoped_token['access']['token'], tenant_name='BAR') # using unscoped token without remote user context fails self.assertRaises( exception.Unauthorized, self.controller.authenticate, self.empty_context, body_dict) # using token with remote user context succeeds scoped_token = self.controller.authenticate( self.context_with_remote_user, body_dict) # the bind information should be carried over from the original token bind = scoped_token['access']['token']['bind'] self.assertEqual('FOO', bind['kerberos']) def test_deleting_role_revokes_token(self): role_controller = assignment.controllers.Role() project1 = unit.new_project_ref( domain_id=CONF.identity.default_domain_id) self.resource_api.create_project(project1['id'], project1) role_one = unit.new_role_ref(id='role_one') self.role_api.create_role(role_one['id'], role_one) self.assignment_api.add_role_to_user_and_project( self.user_foo['id'], project1['id'], role_one['id']) no_context = {} # Get a scoped token for the tenant body_dict = _build_user_auth( username=self.user_foo['name'], password=self.user_foo['password'], tenant_name=project1['name']) token = self.controller.authenticate(no_context, body_dict) # Ensure it is valid token_id = token['access']['token']['id'] self.controller.validate_token( dict(is_admin=True, query_string={}), token_id=token_id) # Delete the role, which should invalidate the token role_controller.delete_role( dict(is_admin=True, query_string={}), role_one['id']) # Check the token is now invalid self.assertRaises( exception.TokenNotFound, self.controller.validate_token, dict(is_admin=True, query_string={}), token_id=token_id) def test_deleting_role_assignment_does_not_revoke_unscoped_token(self): no_context = {} admin_context = dict(is_admin=True, query_string={}) project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id) self.resource_api.create_project(project['id'], project) role = unit.new_role_ref() self.role_api.create_role(role['id'], role) self.assignment_api.add_role_to_user_and_project( self.user_foo['id'], project['id'], role['id']) # Get an unscoped token. token = self.controller.authenticate(no_context, _build_user_auth( username=self.user_foo['name'], password=self.user_foo['password'])) token_id = token['access']['token']['id'] # Ensure it is valid self.controller.validate_token(admin_context, token_id=token_id) # Delete the role assignment, which should not invalidate the token, # because we're not consuming it with just an unscoped token. self.assignment_api.remove_role_from_user_and_project( self.user_foo['id'], project['id'], role['id']) # Ensure it is still valid self.controller.validate_token(admin_context, token_id=token_id) def test_only_original_audit_id_is_kept(self): context = {} def get_audit_ids(token): return token['access']['token']['audit_ids'] # get a token body_dict = _build_user_auth(username='FOO', password='foo2') unscoped_token = self.controller.authenticate(context, body_dict) starting_audit_id = get_audit_ids(unscoped_token)[0] self.assertIsNotNone(starting_audit_id) # get another token to ensure the correct parent audit_id is set body_dict = _build_user_auth(token=unscoped_token["access"]["token"]) unscoped_token_2 = self.controller.authenticate(context, body_dict) audit_ids = get_audit_ids(unscoped_token_2) self.assertThat(audit_ids, matchers.HasLength(2)) self.assertThat(audit_ids[-1], matchers.Equals(starting_audit_id)) # get another token from token 2 and ensure the correct parent # audit_id is set body_dict = _build_user_auth(token=unscoped_token_2["access"]["token"]) unscoped_token_3 = self.controller.authenticate(context, body_dict) audit_ids = get_audit_ids(unscoped_token_3) self.assertThat(audit_ids, matchers.HasLength(2)) self.assertThat(audit_ids[-1], matchers.Equals(starting_audit_id)) def test_revoke_by_audit_chain_id_original_token(self): self.config_fixture.config(group='token', revoke_by_id=False) context = {} # get a token body_dict = _build_user_auth(username='FOO', password='foo2') unscoped_token = self.controller.authenticate(context, body_dict) token_id = unscoped_token['access']['token']['id'] self.time_fixture.advance_time_seconds(1) # get a second token body_dict = _build_user_auth(token=unscoped_token["access"]["token"]) unscoped_token_2 = self.controller.authenticate(context, body_dict) token_2_id = unscoped_token_2['access']['token']['id'] self.time_fixture.advance_time_seconds(1) self.token_provider_api.revoke_token(token_id, revoke_chain=True) self.assertRaises(exception.TokenNotFound, self.token_provider_api.validate_v2_token, token_id=token_id) self.assertRaises(exception.TokenNotFound, self.token_provider_api.validate_v2_token, token_id=token_2_id) def test_revoke_by_audit_chain_id_chained_token(self): self.config_fixture.config(group='token', revoke_by_id=False) context = {} # get a token body_dict = _build_user_auth(username='FOO', password='foo2') unscoped_token = self.controller.authenticate(context, body_dict) token_id = unscoped_token['access']['token']['id'] self.time_fixture.advance_time_seconds(1) # get a second token body_dict = _build_user_auth(token=unscoped_token["access"]["token"]) unscoped_token_2 = self.controller.authenticate(context, body_dict) token_2_id = unscoped_token_2['access']['token']['id'] self.time_fixture.advance_time_seconds(1) self.token_provider_api.revoke_token(token_2_id, revoke_chain=True) self.assertRaises(exception.TokenNotFound, self.token_provider_api.validate_v2_token, token_id=token_id) self.assertRaises(exception.TokenNotFound, self.token_provider_api.validate_v2_token, token_id=token_2_id) def _mock_audit_info(self, parent_audit_id): # NOTE(morgainfainberg): The token model and other cases that are # extracting the audit id expect 'None' if the audit id doesn't # exist. This ensures that the audit_id is None and the # audit_chain_id will also return None. return [None, None] def test_revoke_with_no_audit_info(self): self.config_fixture.config(group='token', revoke_by_id=False) context = {} with mock.patch.object(provider, 'audit_info', self._mock_audit_info): # get a token body_dict = _build_user_auth(username='FOO', password='foo2') unscoped_token = self.controller.authenticate(context, body_dict) token_id = unscoped_token['access']['token']['id'] self.time_fixture.advance_time_seconds(1) # get a second token body_dict = _build_user_auth( token=unscoped_token['access']['token']) unscoped_token_2 = self.controller.authenticate(context, body_dict) token_2_id = unscoped_token_2['access']['token']['id'] self.time_fixture.advance_time_seconds(1) self.token_provider_api.revoke_token(token_id, revoke_chain=True) self.time_fixture.advance_time_seconds(1) revoke_events = self.revoke_api.list_events() self.assertThat(revoke_events, matchers.HasLength(1)) revoke_event = revoke_events[0].to_dict() self.assertIn('expires_at', revoke_event) self.assertEqual(unscoped_token_2['access']['token']['expires'], revoke_event['expires_at']) self.assertRaises(exception.TokenNotFound, self.token_provider_api.validate_v2_token, token_id=token_id) self.assertRaises(exception.TokenNotFound, self.token_provider_api.validate_v2_token, token_id=token_2_id) # get a new token, with no audit info body_dict = _build_user_auth(username='FOO', password='foo2') unscoped_token = self.controller.authenticate(context, body_dict) token_id = unscoped_token['access']['token']['id'] self.time_fixture.advance_time_seconds(1) # get a second token body_dict = _build_user_auth( token=unscoped_token['access']['token']) unscoped_token_2 = self.controller.authenticate(context, body_dict) token_2_id = unscoped_token_2['access']['token']['id'] self.time_fixture.advance_time_seconds(1) # Revoke by audit_id, no audit_info means both parent and child # token are revoked. self.token_provider_api.revoke_token(token_id) self.time_fixture.advance_time_seconds(1) revoke_events = self.revoke_api.list_events() self.assertThat(revoke_events, matchers.HasLength(2)) revoke_event = revoke_events[1].to_dict() self.assertIn('expires_at', revoke_event) self.assertEqual(unscoped_token_2['access']['token']['expires'], revoke_event['expires_at']) self.assertRaises(exception.TokenNotFound, self.token_provider_api.validate_v2_token, token_id=token_id) self.assertRaises(exception.TokenNotFound, self.token_provider_api.validate_v2_token, token_id=token_2_id) class AuthWithPasswordCredentials(AuthTest): def test_auth_invalid_user(self): """Verify exception is raised if invalid user.""" body_dict = _build_user_auth( username=uuid.uuid4().hex, password=uuid.uuid4().hex) self.assertRaises( exception.Unauthorized, self.controller.authenticate, {}, body_dict) def test_auth_valid_user_invalid_password(self): """Verify exception is raised if invalid password.""" body_dict = _build_user_auth( username="FOO", password=uuid.uuid4().hex) self.assertRaises( exception.Unauthorized, self.controller.authenticate, {}, body_dict) def test_auth_empty_password(self): """Verify exception is raised if empty password.""" body_dict = _build_user_auth( username="FOO", password="") self.assertRaises( exception.Unauthorized, self.controller.authenticate, {}, body_dict) def test_auth_no_password(self): """Verify exception is raised if empty password.""" body_dict = _build_user_auth(username="FOO") self.assertRaises( exception.ValidationError, self.controller.authenticate, {}, body_dict) def test_authenticate_blank_password_credentials(self): """Sending empty dict as passwordCredentials raises 400 Bad Requset.""" body_dict = {'passwordCredentials': {}, 'tenantName': 'demo'} self.assertRaises(exception.ValidationError, self.controller.authenticate, {}, body_dict) def test_authenticate_no_username(self): """Verify skipping username raises the right exception.""" body_dict = _build_user_auth(password="pass", tenant_name="demo") self.assertRaises(exception.ValidationError, self.controller.authenticate, {}, body_dict) def test_bind_without_remote_user(self): self.config_fixture.config(group='token', bind=['kerberos']) body_dict = _build_user_auth(username='FOO', password='foo2', tenant_name='BAR') token = self.controller.authenticate({}, body_dict) self.assertNotIn('bind', token['access']['token']) def test_change_default_domain_id(self): # If the default_domain_id config option is not the default then the # user in auth data is from the new default domain. # 1) Create a new domain. new_domain = unit.new_domain_ref() new_domain_id = new_domain['id'] self.resource_api.create_domain(new_domain_id, new_domain) # 2) Create user "foo" in new domain with different password than # default-domain foo. new_user = unit.create_user(self.identity_api, name=self.user_foo['name'], domain_id=new_domain_id) # 3) Update the default_domain_id config option to the new domain self.config_fixture.config(group='identity', default_domain_id=new_domain_id) # 4) Authenticate as "foo" using the password in the new domain. body_dict = _build_user_auth( username=self.user_foo['name'], password=new_user['password']) # The test is successful if this doesn't raise, so no need to assert. self.controller.authenticate({}, body_dict) class AuthWithRemoteUser(AuthTest): def test_unscoped_remote_authn(self): """Verify getting an unscoped token with external authn.""" body_dict = _build_user_auth( username='FOO', password='foo2') local_token = self.controller.authenticate( {}, body_dict) body_dict = _build_user_auth() remote_token = self.controller.authenticate( self.context_with_remote_user, body_dict) self.assertEqualTokens(local_token, remote_token, enforce_audit_ids=False) def test_unscoped_remote_authn_jsonless(self): """Verify that external auth with invalid request fails.""" self.assertRaises( exception.ValidationError, self.controller.authenticate, {'REMOTE_USER': 'FOO'}, None) def test_scoped_remote_authn(self): """Verify getting a token with external authn.""" body_dict = _build_user_auth( username='FOO', password='foo2', tenant_name='BAR') local_token = self.controller.authenticate( {}, body_dict) body_dict = _build_user_auth( tenant_name='BAR') remote_token = self.controller.authenticate( self.context_with_remote_user, body_dict) self.assertEqualTokens(local_token, remote_token, enforce_audit_ids=False) def test_scoped_nometa_remote_authn(self): """Verify getting a token with external authn and no metadata.""" body_dict = _build_user_auth( username='TWO', password='two2', tenant_name='BAZ') local_token = self.controller.authenticate( {}, body_dict) body_dict = _build_user_auth(tenant_name='BAZ') remote_token = self.controller.authenticate( {'environment': {'REMOTE_USER': 'TWO'}}, body_dict) self.assertEqualTokens(local_token, remote_token, enforce_audit_ids=False) def test_scoped_remote_authn_invalid_user(self): """Verify that external auth with invalid user fails.""" body_dict = _build_user_auth(tenant_name="BAR") self.assertRaises( exception.Unauthorized, self.controller.authenticate, {'environment': {'REMOTE_USER': uuid.uuid4().hex}}, body_dict) def test_bind_with_kerberos(self): self.config_fixture.config(group='token', bind=['kerberos']) body_dict = _build_user_auth(tenant_name="BAR") token = self.controller.authenticate(self.context_with_remote_user, body_dict) self.assertEqual('FOO', token['access']['token']['bind']['kerberos']) def test_bind_without_config_opt(self): self.config_fixture.config(group='token', bind=['x509']) body_dict = _build_user_auth(tenant_name='BAR') token = self.controller.authenticate(self.context_with_remote_user, body_dict) self.assertNotIn('bind', token['access']['token']) class AuthWithTrust(AuthTest): def setUp(self): super(AuthWithTrust, self).setUp() self.trust_controller = trust.controllers.TrustV3() self.auth_v3_controller = auth.controllers.Auth() self.trustor = self.user_foo self.trustee = self.user_two self.assigned_roles = [self.role_member['id'], self.role_browser['id']] for assigned_role in self.assigned_roles: self.assignment_api.add_role_to_user_and_project( self.trustor['id'], self.tenant_bar['id'], assigned_role) self.sample_data = {'trustor_user_id': self.trustor['id'], 'trustee_user_id': self.trustee['id'], 'project_id': self.tenant_bar['id'], 'impersonation': True, 'roles': [{'id': self.role_browser['id']}, {'name': self.role_member['name']}]} def config_overrides(self): super(AuthWithTrust, self).config_overrides() self.config_fixture.config(group='trust', enabled=True) def _create_auth_context(self, token_id): token_ref = token_model.KeystoneToken( token_id=token_id, token_data=self.token_provider_api.validate_token(token_id)) auth_context = authorization.token_to_auth_context(token_ref) # NOTE(gyee): if public_endpoint and admin_endpoint are not set, which # is the default, the base url will be constructed from the environment # variables wsgi.url_scheme, SERVER_NAME, SERVER_PORT, and SCRIPT_NAME. # We have to set them in the context so the base url can be constructed # accordingly. return {'environment': {authorization.AUTH_CONTEXT_ENV: auth_context, 'wsgi.url_scheme': 'http', 'SCRIPT_NAME': '/v3', 'SERVER_PORT': '80', 'SERVER_NAME': HOST}, 'token_id': token_id, 'host_url': HOST_URL} def create_trust(self, trust_data, trustor_name, expires_at=None, impersonation=True): username = trustor_name password = 'foo2' unscoped_token = self.get_unscoped_token(username, password) context = self._create_auth_context( unscoped_token['access']['token']['id']) trust_data_copy = copy.deepcopy(trust_data) trust_data_copy['expires_at'] = expires_at trust_data_copy['impersonation'] = impersonation return self.trust_controller.create_trust( context, trust=trust_data_copy)['trust'] def get_unscoped_token(self, username, password='foo2'): body_dict = _build_user_auth(username=username, password=password) return self.controller.authenticate({}, body_dict) def build_v2_token_request(self, username, password, trust, tenant_id=None): if not tenant_id: tenant_id = self.tenant_bar['id'] unscoped_token = self.get_unscoped_token(username, password) unscoped_token_id = unscoped_token['access']['token']['id'] request_body = _build_user_auth(token={'id': unscoped_token_id}, trust_id=trust['id'], tenant_id=tenant_id) return request_body def test_create_trust_bad_data_fails(self): unscoped_token = self.get_unscoped_token(self.trustor['name']) context = self._create_auth_context( unscoped_token['access']['token']['id']) bad_sample_data = {'trustor_user_id': self.trustor['id'], 'project_id': self.tenant_bar['id'], 'roles': [{'id': self.role_browser['id']}]} self.assertRaises(exception.ValidationError, self.trust_controller.create_trust, context, trust=bad_sample_data) def test_create_trust_no_roles(self): unscoped_token = self.get_unscoped_token(self.trustor['name']) context = {'token_id': unscoped_token['access']['token']['id']} self.sample_data['roles'] = [] self.assertRaises(exception.Forbidden, self.trust_controller.create_trust, context, trust=self.sample_data) def test_create_trust(self): expires_at = (timeutils.utcnow() + datetime.timedelta(minutes=10)).strftime(TIME_FORMAT) new_trust = self.create_trust(self.sample_data, self.trustor['name'], expires_at=expires_at) self.assertEqual(self.trustor['id'], new_trust['trustor_user_id']) self.assertEqual(self.trustee['id'], new_trust['trustee_user_id']) role_ids = [self.role_browser['id'], self.role_member['id']] self.assertTrue(timeutils.parse_strtime(new_trust['expires_at'], fmt=TIME_FORMAT)) self.assertIn('%s/v3/OS-TRUST/' % HOST_URL, new_trust['links']['self']) self.assertIn('%s/v3/OS-TRUST/' % HOST_URL, new_trust['roles_links']['self']) for role in new_trust['roles']: self.assertIn(role['id'], role_ids) def test_create_trust_expires_bad(self): self.assertRaises(exception.ValidationTimeStampError, self.create_trust, self.sample_data, self.trustor['name'], expires_at="bad") self.assertRaises(exception.ValidationTimeStampError, self.create_trust, self.sample_data, self.trustor['name'], expires_at="") self.assertRaises(exception.ValidationTimeStampError, self.create_trust, self.sample_data, self.trustor['name'], expires_at="Z") def test_create_trust_expires_older_than_now(self): self.assertRaises(exception.ValidationExpirationError, self.create_trust, self.sample_data, self.trustor['name'], expires_at="2010-06-04T08:44:31.999999Z") def test_create_trust_without_project_id(self): """Verify that trust can be created without project id. Also, token can be generated with that trust. """ unscoped_token = self.get_unscoped_token(self.trustor['name']) context = self._create_auth_context( unscoped_token['access']['token']['id']) self.sample_data['project_id'] = None self.sample_data['roles'] = [] new_trust = self.trust_controller.create_trust( context, trust=self.sample_data)['trust'] self.assertEqual(self.trustor['id'], new_trust['trustor_user_id']) self.assertEqual(self.trustee['id'], new_trust['trustee_user_id']) self.assertIs(new_trust['impersonation'], True) auth_response = self.fetch_v2_token_from_trust(new_trust) token_user = auth_response['access']['user'] self.assertEqual(token_user['id'], new_trust['trustor_user_id']) def test_get_trust(self): unscoped_token = self.get_unscoped_token(self.trustor['name']) context = self._create_auth_context( unscoped_token['access']['token']['id']) new_trust = self.trust_controller.create_trust( context, trust=self.sample_data)['trust'] trust = self.trust_controller.get_trust(context, new_trust['id'])['trust'] self.assertEqual(self.trustor['id'], trust['trustor_user_id']) self.assertEqual(self.trustee['id'], trust['trustee_user_id']) role_ids = [self.role_browser['id'], self.role_member['id']] for role in new_trust['roles']: self.assertIn(role['id'], role_ids) def test_get_trust_without_auth_context(self): """Verify a trust cannot be retrieved if auth context is missing.""" unscoped_token = self.get_unscoped_token(self.trustor['name']) context = self._create_auth_context( unscoped_token['access']['token']['id']) new_trust = self.trust_controller.create_trust( context, trust=self.sample_data)['trust'] # Delete the auth context before calling get_trust(). del context['environment'][authorization.AUTH_CONTEXT_ENV] self.assertRaises(exception.Forbidden, self.trust_controller.get_trust, context, new_trust['id']) def test_create_trust_no_impersonation(self): new_trust = self.create_trust(self.sample_data, self.trustor['name'], expires_at=None, impersonation=False) self.assertEqual(self.trustor['id'], new_trust['trustor_user_id']) self.assertEqual(self.trustee['id'], new_trust['trustee_user_id']) self.assertIs(new_trust['impersonation'], False) auth_response = self.fetch_v2_token_from_trust(new_trust) token_user = auth_response['access']['user'] self.assertEqual(token_user['id'], new_trust['trustee_user_id']) def test_create_trust_impersonation(self): new_trust = self.create_trust(self.sample_data, self.trustor['name']) self.assertEqual(self.trustor['id'], new_trust['trustor_user_id']) self.assertEqual(self.trustee['id'], new_trust['trustee_user_id']) self.assertIs(new_trust['impersonation'], True) auth_response = self.fetch_v2_token_from_trust(new_trust) token_user = auth_response['access']['user'] self.assertEqual(token_user['id'], new_trust['trustor_user_id']) def test_token_from_trust_wrong_user_fails(self): new_trust = self.create_trust(self.sample_data, self.trustor['name']) request_body = self.build_v2_token_request('FOO', 'foo2', new_trust) self.assertRaises(exception.Forbidden, self.controller.authenticate, {}, request_body) def test_token_from_trust_wrong_project_fails(self): for assigned_role in self.assigned_roles: self.assignment_api.add_role_to_user_and_project( self.trustor['id'], self.tenant_baz['id'], assigned_role) new_trust = self.create_trust(self.sample_data, self.trustor['name']) request_body = self.build_v2_token_request('TWO', 'two2', new_trust, self.tenant_baz['id']) self.assertRaises(exception.Forbidden, self.controller.authenticate, {}, request_body) def fetch_v2_token_from_trust(self, trust): request_body = self.build_v2_token_request('TWO', 'two2', trust) auth_response = self.controller.authenticate({}, request_body) return auth_response def fetch_v3_token_from_trust(self, trust, trustee): v3_password_data = { 'identity': { "methods": ["password"], "password": { "user": { "id": trustee["id"], "password": trustee["password"] } } }, 'scope': { 'project': { 'id': self.tenant_baz['id'] } } } auth_response = (self.auth_v3_controller.authenticate_for_token ({'environment': {}, 'query_string': {}}, v3_password_data)) token = auth_response.headers['X-Subject-Token'] v3_req_with_trust = { "identity": { "methods": ["token"], "token": {"id": token}}, "scope": { "OS-TRUST:trust": {"id": trust['id']}}} token_auth_response = (self.auth_v3_controller.authenticate_for_token ({'environment': {}, 'query_string': {}}, v3_req_with_trust)) return token_auth_response def test_create_v3_token_from_trust(self): new_trust = self.create_trust(self.sample_data, self.trustor['name']) auth_response = self.fetch_v3_token_from_trust(new_trust, self.trustee) trust_token_user = auth_response.json['token']['user'] self.assertEqual(self.trustor['id'], trust_token_user['id']) trust_token_trust = auth_response.json['token']['OS-TRUST:trust'] self.assertEqual(trust_token_trust['id'], new_trust['id']) self.assertEqual(self.trustor['id'], trust_token_trust['trustor_user']['id']) self.assertEqual(self.trustee['id'], trust_token_trust['trustee_user']['id']) trust_token_roles = auth_response.json['token']['roles'] self.assertEqual(2, len(trust_token_roles)) def test_v3_trust_token_get_token_fails(self): new_trust = self.create_trust(self.sample_data, self.trustor['name']) auth_response = self.fetch_v3_token_from_trust(new_trust, self.trustee) trust_token = auth_response.headers['X-Subject-Token'] v3_token_data = {'identity': { 'methods': ['token'], 'token': {'id': trust_token} }} self.assertRaises( exception.Forbidden, self.auth_v3_controller.authenticate_for_token, {'environment': {}, 'query_string': {}}, v3_token_data) def test_token_from_trust(self): new_trust = self.create_trust(self.sample_data, self.trustor['name']) auth_response = self.fetch_v2_token_from_trust(new_trust) self.assertIsNotNone(auth_response) self.assertEqual(2, len(auth_response['access']['metadata']['roles']), "user_foo has three roles, but the token should" " only get the two roles specified in the trust.") def assert_token_count_for_trust(self, trust, expected_value): tokens = self.token_provider_api._persistence._list_tokens( self.trustee['id'], trust_id=trust['id']) token_count = len(tokens) self.assertEqual(expected_value, token_count) def test_delete_tokens_for_user_invalidates_tokens_from_trust(self): new_trust = self.create_trust(self.sample_data, self.trustor['name']) self.assert_token_count_for_trust(new_trust, 0) self.fetch_v2_token_from_trust(new_trust) self.assert_token_count_for_trust(new_trust, 1) self.token_provider_api._persistence.delete_tokens_for_user( self.trustee['id']) self.assert_token_count_for_trust(new_trust, 0) def test_token_from_trust_cant_get_another_token(self): new_trust = self.create_trust(self.sample_data, self.trustor['name']) auth_response = self.fetch_v2_token_from_trust(new_trust) trust_token_id = auth_response['access']['token']['id'] request_body = _build_user_auth(token={'id': trust_token_id}, tenant_id=self.tenant_bar['id']) self.assertRaises( exception.Unauthorized, self.controller.authenticate, {}, request_body) def test_delete_trust_revokes_token(self): unscoped_token = self.get_unscoped_token(self.trustor['name']) new_trust = self.create_trust(self.sample_data, self.trustor['name']) context = self._create_auth_context( unscoped_token['access']['token']['id']) self.fetch_v2_token_from_trust(new_trust) trust_id = new_trust['id'] tokens = self.token_provider_api._persistence._list_tokens( self.trustor['id'], trust_id=trust_id) self.assertEqual(1, len(tokens)) self.trust_controller.delete_trust(context, trust_id=trust_id) tokens = self.token_provider_api._persistence._list_tokens( self.trustor['id'], trust_id=trust_id) self.assertEqual(0, len(tokens)) def test_token_from_trust_with_no_role_fails(self): new_trust = self.create_trust(self.sample_data, self.trustor['name']) for assigned_role in self.assigned_roles: self.assignment_api.remove_role_from_user_and_project( self.trustor['id'], self.tenant_bar['id'], assigned_role) request_body = self.build_v2_token_request('TWO', 'two2', new_trust) self.assertRaises( exception.Forbidden, self.controller.authenticate, {}, request_body) def test_expired_trust_get_token_fails(self): expires_at = (timeutils.utcnow() + datetime.timedelta(minutes=5)).strftime(TIME_FORMAT) time_expired = timeutils.utcnow() + datetime.timedelta(minutes=10) new_trust = self.create_trust(self.sample_data, self.trustor['name'], expires_at) with mock.patch.object(timeutils, 'utcnow') as mock_now: mock_now.return_value = time_expired request_body = self.build_v2_token_request('TWO', 'two2', new_trust) self.assertRaises( exception.Forbidden, self.controller.authenticate, {}, request_body) def test_token_from_trust_with_wrong_role_fails(self): new_trust = self.create_trust(self.sample_data, self.trustor['name']) self.assignment_api.add_role_to_user_and_project( self.trustor['id'], self.tenant_bar['id'], self.role_other['id']) for assigned_role in self.assigned_roles: self.assignment_api.remove_role_from_user_and_project( self.trustor['id'], self.tenant_bar['id'], assigned_role) request_body = self.build_v2_token_request('TWO', 'two2', new_trust) self.assertRaises( exception.Forbidden, self.controller.authenticate, {}, request_body) def test_do_not_consume_remaining_uses_when_get_token_fails(self): trust_data = copy.deepcopy(self.sample_data) trust_data['remaining_uses'] = 3 new_trust = self.create_trust(trust_data, self.trustor['name']) for assigned_role in self.assigned_roles: self.assignment_api.remove_role_from_user_and_project( self.trustor['id'], self.tenant_bar['id'], assigned_role) request_body = self.build_v2_token_request('TWO', 'two2', new_trust) self.assertRaises(exception.Forbidden, self.controller.authenticate, {}, request_body) unscoped_token = self.get_unscoped_token(self.trustor['name']) context = self._create_auth_context( unscoped_token['access']['token']['id']) trust = self.trust_controller.get_trust(context, new_trust['id'])['trust'] self.assertEqual(3, trust['remaining_uses']) def disable_user(self, user): user['enabled'] = False self.identity_api.update_user(user['id'], user) def test_trust_get_token_fails_if_trustor_disabled(self): new_trust = self.create_trust(self.sample_data, self.trustor['name']) request_body = self.build_v2_token_request(self.trustee['name'], self.trustee['password'], new_trust) self.disable_user(self.trustor) self.assertRaises( exception.Forbidden, self.controller.authenticate, {}, request_body) def test_trust_get_token_fails_if_trustee_disabled(self): new_trust = self.create_trust(self.sample_data, self.trustor['name']) request_body = self.build_v2_token_request(self.trustee['name'], self.trustee['password'], new_trust) self.disable_user(self.trustee) self.assertRaises( exception.Unauthorized, self.controller.authenticate, {}, request_body) class TokenExpirationTest(AuthTest): @mock.patch.object(timeutils, 'utcnow') def _maintain_token_expiration(self, mock_utcnow): """Token expiration should be maintained after re-auth & validation.""" now = datetime.datetime.utcnow() mock_utcnow.return_value = now r = self.controller.authenticate( {}, auth={ 'passwordCredentials': { 'username': self.user_foo['name'], 'password': self.user_foo['password'] } }) unscoped_token_id = r['access']['token']['id'] original_expiration = r['access']['token']['expires'] mock_utcnow.return_value = now + datetime.timedelta(seconds=1) r = self.controller.validate_token( dict(is_admin=True, query_string={}), token_id=unscoped_token_id) self.assertEqual(original_expiration, r['access']['token']['expires']) mock_utcnow.return_value = now + datetime.timedelta(seconds=2) r = self.controller.authenticate( {}, auth={ 'token': { 'id': unscoped_token_id, }, 'tenantId': self.tenant_bar['id'], }) scoped_token_id = r['access']['token']['id'] self.assertEqual(original_expiration, r['access']['token']['expires']) mock_utcnow.return_value = now + datetime.timedelta(seconds=3) r = self.controller.validate_token( dict(is_admin=True, query_string={}), token_id=scoped_token_id) self.assertEqual(original_expiration, r['access']['token']['expires']) def test_maintain_uuid_token_expiration(self): self.config_fixture.config(group='token', provider='uuid') self._maintain_token_expiration() class AuthCatalog(unit.SQLDriverOverrides, AuthTest): """Tests for the catalog provided in the auth response.""" def config_files(self): config_files = super(AuthCatalog, self).config_files() # We need to use a backend that supports disabled endpoints, like the # SQL backend. config_files.append(unit.dirs.tests_conf('backend_sql.conf')) return config_files def _create_endpoints(self): def create_region(**kwargs): ref = unit.new_region_ref(**kwargs) self.catalog_api.create_region(ref) return ref def create_endpoint(service_id, region, **kwargs): endpoint = unit.new_endpoint_ref(region_id=region, service_id=service_id, **kwargs) self.catalog_api.create_endpoint(endpoint['id'], endpoint) return endpoint # Create a service for use with the endpoints. def create_service(**kwargs): ref = unit.new_service_ref(**kwargs) self.catalog_api.create_service(ref['id'], ref) return ref enabled_service_ref = create_service(enabled=True) disabled_service_ref = create_service(enabled=False) region = create_region() # Create endpoints enabled_endpoint_ref = create_endpoint( enabled_service_ref['id'], region['id']) create_endpoint( enabled_service_ref['id'], region['id'], enabled=False, interface='internal') create_endpoint( disabled_service_ref['id'], region['id']) return enabled_endpoint_ref def test_auth_catalog_disabled_endpoint(self): """On authenticate, get a catalog that excludes disabled endpoints.""" endpoint_ref = self._create_endpoints() # Authenticate body_dict = _build_user_auth( username='FOO', password='foo2', tenant_name="BAR") token = self.controller.authenticate({}, body_dict) # Check the catalog self.assertEqual(1, len(token['access']['serviceCatalog'])) endpoint = token['access']['serviceCatalog'][0]['endpoints'][0] self.assertEqual( 1, len(token['access']['serviceCatalog'][0]['endpoints'])) exp_endpoint = { 'id': endpoint_ref['id'], 'publicURL': endpoint_ref['url'], 'region': endpoint_ref['region_id'], } self.assertEqual(exp_endpoint, endpoint) def test_validate_catalog_disabled_endpoint(self): """On validate, get back a catalog that excludes disabled endpoints.""" endpoint_ref = self._create_endpoints() # Authenticate body_dict = _build_user_auth( username='FOO', password='foo2', tenant_name="BAR") token = self.controller.authenticate({}, body_dict) # Validate token_id = token['access']['token']['id'] validate_ref = self.controller.validate_token( dict(is_admin=True, query_string={}), token_id=token_id) # Check the catalog self.assertEqual(1, len(token['access']['serviceCatalog'])) endpoint = validate_ref['access']['serviceCatalog'][0]['endpoints'][0] self.assertEqual( 1, len(token['access']['serviceCatalog'][0]['endpoints'])) exp_endpoint = { 'id': endpoint_ref['id'], 'publicURL': endpoint_ref['url'], 'region': endpoint_ref['region_id'], } self.assertEqual(exp_endpoint, endpoint) class NonDefaultAuthTest(unit.TestCase): def test_add_non_default_auth_method(self): self.config_fixture.config(group='auth', methods=['password', 'token', 'custom']) config.setup_authentication() self.assertTrue(hasattr(CONF.auth, 'custom')) keystone-9.0.0/keystone/tests/unit/auth/0000775000567000056710000000000012701407246021450 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/auth/test_controllers.py0000664000567000056710000000727212701407102025426 0ustar jenkinsjenkins00000000000000# Copyright 2015 IBM Corp. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import mock from oslo_config import cfg from oslo_config import fixture as config_fixture from oslo_utils import importutils from oslotest import mockpatch import stevedore from stevedore import extension from keystone.auth import controllers from keystone.tests import unit class TestLoadAuthMethod(unit.BaseTestCase): def test_entrypoint_works(self): method = uuid.uuid4().hex plugin_name = self.getUniqueString() # Register the method using the given plugin cf = self.useFixture(config_fixture.Config()) cf.register_opt(cfg.StrOpt(method), group='auth') cf.config(group='auth', **{method: plugin_name}) # Setup stevedore.DriverManager to return a driver for the plugin extension_ = extension.Extension( plugin_name, entry_point=mock.sentinel.entry_point, plugin=mock.sentinel.plugin, obj=mock.sentinel.driver) auth_plugin_namespace = 'keystone.auth.%s' % method fake_driver_manager = stevedore.DriverManager.make_test_instance( extension_, namespace=auth_plugin_namespace) driver_manager_mock = self.useFixture(mockpatch.PatchObject( stevedore, 'DriverManager', return_value=fake_driver_manager)).mock driver = controllers.load_auth_method(method) self.assertEqual(auth_plugin_namespace, fake_driver_manager.namespace) driver_manager_mock.assert_called_once_with( auth_plugin_namespace, plugin_name, invoke_on_load=True) self.assertIs(driver, mock.sentinel.driver) def test_entrypoint_fails_import_works(self): method = uuid.uuid4().hex plugin_name = self.getUniqueString() # Register the method using the given plugin cf = self.useFixture(config_fixture.Config()) cf.register_opt(cfg.StrOpt(method), group='auth') cf.config(group='auth', **{method: plugin_name}) # stevedore.DriverManager raises RuntimeError if it can't load the # driver. self.useFixture(mockpatch.PatchObject( stevedore, 'DriverManager', side_effect=RuntimeError)) self.useFixture(mockpatch.PatchObject( importutils, 'import_object', return_value=mock.sentinel.driver)) driver = controllers.load_auth_method(method) self.assertIs(driver, mock.sentinel.driver) def test_entrypoint_fails_import_fails(self): method = uuid.uuid4().hex plugin_name = self.getUniqueString() # Register the method using the given plugin cf = self.useFixture(config_fixture.Config()) cf.register_opt(cfg.StrOpt(method), group='auth') cf.config(group='auth', **{method: plugin_name}) # stevedore.DriverManager raises RuntimeError if it can't load the # driver. self.useFixture(mockpatch.PatchObject( stevedore, 'DriverManager', side_effect=RuntimeError)) class TestException(Exception): pass self.useFixture(mockpatch.PatchObject( importutils, 'import_object', side_effect=TestException)) self.assertRaises(TestException, controllers.load_auth_method, method) keystone-9.0.0/keystone/tests/unit/auth/__init__.py0000664000567000056710000000000012701407102023536 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/ksfixtures/0000775000567000056710000000000012701407246022716 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/unit/ksfixtures/policy.py0000664000567000056710000000221212701407102024553 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import fixtures from oslo_policy import opts from keystone.policy.backends import rules class Policy(fixtures.Fixture): """A fixture for working with policy configuration.""" def __init__(self, policy_file, config_fixture): self._policy_file = policy_file self._config_fixture = config_fixture def setUp(self): super(Policy, self).setUp() opts.set_defaults(self._config_fixture.conf) self._config_fixture.config(group='oslo_policy', policy_file=self._policy_file) rules.init() self.addCleanup(rules.reset) keystone-9.0.0/keystone/tests/unit/ksfixtures/temporaryfile.py0000664000567000056710000000166212701407102026146 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import tempfile import fixtures class SecureTempFile(fixtures.Fixture): """A fixture for creating a secure temp file.""" def setUp(self): super(SecureTempFile, self).setUp() _fd, self.file_name = tempfile.mkstemp() # Make sure no file descriptors are leaked, close the unused FD. os.close(_fd) self.addCleanup(os.remove, self.file_name) keystone-9.0.0/keystone/tests/unit/ksfixtures/key_repository.py0000664000567000056710000000212612701407102026347 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures from keystone.token.providers.fernet import utils class KeyRepository(fixtures.Fixture): def __init__(self, config_fixture): super(KeyRepository, self).__init__() self.config_fixture = config_fixture def setUp(self): super(KeyRepository, self).setUp() directory = self.useFixture(fixtures.TempDir()).path self.config_fixture.config(group='fernet_tokens', key_repository=directory) utils.create_key_directory() utils.initialize_key_repository() keystone-9.0.0/keystone/tests/unit/ksfixtures/__init__.py0000664000567000056710000000150412701407102025016 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from keystone.tests.unit.ksfixtures.auth_plugins import ConfigAuthPlugins # noqa from keystone.tests.unit.ksfixtures.cache import Cache # noqa from keystone.tests.unit.ksfixtures.key_repository import KeyRepository # noqa from keystone.tests.unit.ksfixtures.policy import Policy # noqa keystone-9.0.0/keystone/tests/unit/ksfixtures/ldapdb.py0000664000567000056710000000236412701407102024512 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import fixtures from keystone.common import ldap as common_ldap from keystone.common.ldap import core as common_ldap_core from keystone.tests.unit import fakeldap class LDAPDatabase(fixtures.Fixture): """A fixture for setting up and tearing down an LDAP database.""" def setUp(self): super(LDAPDatabase, self).setUp() self.clear() common_ldap_core._HANDLERS.clear() common_ldap.register_handler('fake://', fakeldap.FakeLdap) # TODO(dstanek): switch the flow here self.addCleanup(self.clear) self.addCleanup(common_ldap_core._HANDLERS.clear) def clear(self): for shelf in fakeldap.FakeShelves: fakeldap.FakeShelves[shelf].clear() keystone-9.0.0/keystone/tests/unit/ksfixtures/appserver.py0000664000567000056710000000522612701407102025273 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import import fixtures from oslo_config import cfg from paste import deploy from keystone.common import environment CONF = cfg.CONF MAIN = 'main' ADMIN = 'admin' class AppServer(fixtures.Fixture): """A fixture for managing an application server instance.""" def __init__(self, config, name, cert=None, key=None, ca=None, cert_required=False, host='127.0.0.1', port=0): super(AppServer, self).__init__() self.config = config self.name = name self.cert = cert self.key = key self.ca = ca self.cert_required = cert_required self.host = host self.port = port def setUp(self): super(AppServer, self).setUp() app = deploy.loadapp(self.config, name=self.name) self.server = environment.Server(app, self.host, self.port) self._setup_SSL_if_requested() self.server.start(key='socket') # some tests need to know the port we ran on. self.port = self.server.socket_info['socket'][1] self._update_config_opt() self.addCleanup(self.server.stop) def _setup_SSL_if_requested(self): # TODO(dstanek): fix environment.Server to take a SSLOpts instance # so that the params are either always set or not if (self.cert is not None and self.ca is not None and self.key is not None): self.server.set_ssl(certfile=self.cert, keyfile=self.key, ca_certs=self.ca, cert_required=self.cert_required) def _update_config_opt(self): """Updates the config with the actual port used.""" opt_name = self._get_config_option_for_section_name() CONF.set_override(opt_name, self.port, group='eventlet_server', enforce_type=True) def _get_config_option_for_section_name(self): """Maps Paster config section names to port option names.""" return {'admin': 'admin_port', 'main': 'public_port'}[self.name] keystone-9.0.0/keystone/tests/unit/ksfixtures/auth_plugins.py0000664000567000056710000000240712701407102025764 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import fixtures from keystone.common import config as common_cfg class ConfigAuthPlugins(fixtures.Fixture): """A fixture for setting up and tearing down a auth plugins.""" def __init__(self, config_fixture, methods, **method_classes): super(ConfigAuthPlugins, self).__init__() self.methods = methods self.config_fixture = config_fixture self.method_classes = method_classes def setUp(self): super(ConfigAuthPlugins, self).setUp() if self.methods: self.config_fixture.config(group='auth', methods=self.methods) common_cfg.setup_authentication() if self.method_classes: self.config_fixture.config(group='auth', **self.method_classes) keystone-9.0.0/keystone/tests/unit/ksfixtures/cache.py0000664000567000056710000000277612701407102024336 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import fixtures from keystone import catalog from keystone.common import cache CACHE_REGIONS = (cache.CACHE_REGION, catalog.COMPUTED_CATALOG_REGION) class Cache(fixtures.Fixture): """A fixture for setting up the cache between test cases. This will also tear down an existing cache if one is already configured. """ def setUp(self): super(Cache, self).setUp() # NOTE(dstanek): We must remove the existing cache backend in the # setUp instead of the tearDown because it defaults to a no-op cache # and we want the configure call below to create the correct backend. # NOTE(morganfainberg): The only way to reconfigure the CacheRegion # object on each setUp() call is to remove the .backend property. for region in CACHE_REGIONS: if region.is_configured: del region.backend # ensure the cache region instance is setup cache.configure_cache(region=region) keystone-9.0.0/keystone/tests/unit/ksfixtures/database.py0000664000567000056710000001415412701407105025033 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import functools import os import fixtures from oslo_config import cfg from oslo_db import options as db_options from keystone.common import sql from keystone.tests import unit CONF = cfg.CONF def run_once(f): """A decorator to ensure the decorated function is only executed once. The decorated function is assumed to have a one parameter. """ @functools.wraps(f) def wrapper(one): if not wrapper.already_ran: f(one) wrapper.already_ran = True wrapper.already_ran = False return wrapper # NOTE(I159): Every execution all the options will be cleared. The method must # be called at the every fixture initialization. def initialize_sql_session(): # Make sure the DB is located in the correct location, in this case set # the default value, as this should be able to be overridden in some # test cases. db_options.set_defaults( CONF, connection=unit.IN_MEM_DB_CONN_STRING) @run_once def _load_sqlalchemy_models(version_specifiers): """Find all modules containing SQLAlchemy models and import them. This creates more consistent, deterministic test runs because tables for all core and extension models are always created in the test database. We ensure this by importing all modules that contain model definitions. The database schema during test runs is created using reflection. Reflection is simply SQLAlchemy taking the model definitions for all models currently imported and making tables for each of them. The database schema created during test runs may vary between tests as more models are imported. Importing all models at the start of the test run avoids this problem. version_specifiers is a dict that contains any specific driver versions that have been requested. The dict is of the form: { : {'versioned_backend' : , 'versionless_backend' : } } For example: {'keystone.assignment': {'versioned_backend' : 'V8_backends', 'versionless_backend' : 'backends'}, 'keystone.identity': {'versioned_backend' : 'V9_backends', 'versionless_backend' : 'backends'} } The version_specifiers will be used to load the correct driver. The algorithm for this assumes that versioned drivers begin in 'V'. """ keystone_root = os.path.normpath(os.path.join( os.path.dirname(__file__), '..', '..', '..')) for root, dirs, files in os.walk(keystone_root): # NOTE(morganfainberg): Slice the keystone_root off the root to ensure # we do not end up with a module name like: # Users.home.openstack.keystone.assignment.backends.sql root = root[len(keystone_root):] if root.endswith('backends') and 'sql.py' in files: # The root will be prefixed with an instance of os.sep, which will # make the root after replacement '.', the 'keystone' part # of the module path is always added to the front module_root = ('keystone.%s' % root.replace(os.sep, '.').lstrip('.')) module_components = module_root.split('.') module_without_backends = '' for x in range(0, len(module_components) - 1): module_without_backends += module_components[x] + '.' module_without_backends = module_without_backends.rstrip('.') this_backend = module_components[len(module_components) - 1] # At this point module_without_backends might be something like # 'keystone.assignment', while this_backend might be something # 'V8_backends'. if module_without_backends.startswith('keystone.contrib'): # All the sql modules have now been moved into the core tree # so no point in loading these again here (and, in fact, doing # so might break trying to load a versioned driver. continue if module_without_backends in version_specifiers: # OK, so there is a request for a specific version of this one. # We therefore should skip any other versioned backend as well # as the non-versioned one. version = version_specifiers[module_without_backends] if ((this_backend != version['versioned_backend'] and this_backend.startswith('V')) or this_backend == version['versionless_backend']): continue else: # No versioned driver requested, so ignore any that are # versioned if this_backend.startswith('V'): continue module_name = module_root + '.sql' __import__(module_name) class Database(fixtures.Fixture): """A fixture for setting up and tearing down a database.""" def __init__(self, version_specifiers=None): super(Database, self).__init__() initialize_sql_session() if version_specifiers is None: version_specifiers = {} _load_sqlalchemy_models(version_specifiers) def setUp(self): super(Database, self).setUp() with sql.session_for_write() as session: self.engine = session.get_bind() self.addCleanup(sql.cleanup) sql.ModelBase.metadata.create_all(bind=self.engine) self.addCleanup(sql.ModelBase.metadata.drop_all, bind=self.engine) def recreate(self): sql.ModelBase.metadata.create_all(bind=self.engine) keystone-9.0.0/keystone/tests/unit/ksfixtures/hacking.py0000664000567000056710000003061512701407102024670 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(morganfainberg) This file shouldn't have flake8 run on it as it has # code examples that will fail normal CI pep8/flake8 tests. This is expected. # The code has been moved here to ensure that proper tests occur on the # test_hacking_checks test cases. # flake8: noqa import fixtures class HackingCode(fixtures.Fixture): """A fixture to house the various code examples for the keystone hacking style checks. """ mutable_default_args = { 'code': """ def f(): pass def f(a, b='', c=None): pass def f(bad=[]): pass def f(foo, bad=[], more_bad=[x for x in range(3)]): pass def f(foo, bad={}): pass def f(foo, bad={}, another_bad=[], fine=None): pass def f(bad=[]): # noqa pass def funcs(bad=dict(), more_bad=list(), even_more_bad=set()): "creating mutables through builtins" def funcs(bad=something(), more_bad=some_object.something()): "defaults from any functions" def f(bad=set(), more_bad={x for x in range(3)}, even_more_bad={1, 2, 3}): "set and set comprehession" def f(bad={x: x for x in range(3)}): "dict comprehension" """, 'expected_errors': [ (7, 10, 'K001'), (10, 15, 'K001'), (10, 29, 'K001'), (13, 15, 'K001'), (16, 15, 'K001'), (16, 31, 'K001'), (22, 14, 'K001'), (22, 31, 'K001'), (22, 53, 'K001'), (25, 14, 'K001'), (25, 36, 'K001'), (28, 10, 'K001'), (28, 27, 'K001'), (29, 21, 'K001'), (32, 11, 'K001'), ]} comments_begin_with_space = { 'code': """ # This is a good comment #This is a bad one # This is alright and can # be continued with extra indentation # if that's what the developer wants. """, 'expected_errors': [ (3, 0, 'K002'), ]} asserting_none_equality = { 'code': """ class Test(object): def test(self): self.assertEqual('', '') self.assertEqual('', None) self.assertEqual(None, '') self.assertNotEqual('', None) self.assertNotEqual(None, '') self.assertNotEqual('', None) # noqa self.assertNotEqual(None, '') # noqa """, 'expected_errors': [ (5, 8, 'K003'), (6, 8, 'K003'), (7, 8, 'K004'), (8, 8, 'K004'), ]} dict_constructor = { 'code': """ lower_res = {k.lower(): v for k, v in six.iteritems(res[1])} fool = dict(a='a', b='b') lower_res = dict((k.lower(), v) for k, v in six.iteritems(res[1])) attrs = dict([(k, _from_json(v))]) dict([[i,i] for i in range(3)]) dict(({1:2})) """, 'expected_errors': [ (3, 0, 'K008'), (4, 0, 'K008'), (5, 0, 'K008'), ]} class HackingLogging(fixtures.Fixture): shared_imports = """ import logging import logging as stlib_logging from keystone.i18n import _ from keystone.i18n import _ as oslo_i18n from keystone.i18n import _LC from keystone.i18n import _LE from keystone.i18n import _LE as error_hint from keystone.i18n import _LI from keystone.i18n import _LW from oslo_log import log from oslo_log import log as oslo_logging """ examples = [ { 'code': """ # stdlib logging LOG = logging.getLogger() LOG.info(_('text')) class C: def __init__(self): LOG.warning(oslo_i18n('text', {})) LOG.warning(_LW('text', {})) """, 'expected_errors': [ (3, 9, 'K006'), (6, 20, 'K006'), ], }, { 'code': """ # stdlib logging w/ alias and specifying a logger class C: def __init__(self): self.L = logging.getLogger(__name__) def m(self): self.L.warning( _('text'), {} ) self.L.warning( _LW('text'), {} ) """, 'expected_errors': [ (7, 12, 'K006'), ], }, { 'code': """ # oslo logging and specifying a logger L = log.getLogger(__name__) L.error(oslo_i18n('text')) L.error(error_hint('text')) """, 'expected_errors': [ (3, 8, 'K006'), ], }, { 'code': """ # oslo logging w/ alias class C: def __init__(self): self.LOG = oslo_logging.getLogger() self.LOG.critical(_('text')) self.LOG.critical(_LC('text')) """, 'expected_errors': [ (5, 26, 'K006'), ], }, { 'code': """ LOG = log.getLogger(__name__) # translation on a separate line msg = _('text') LOG.exception(msg) msg = _LE('text') LOG.exception(msg) """, 'expected_errors': [ (4, 14, 'K006'), ], }, { 'code': """ LOG = logging.getLogger() # ensure the correct helper is being used LOG.warning(_LI('this should cause an error')) # debug should not allow any helpers either LOG.debug(_LI('this should cause an error')) """, 'expected_errors': [ (4, 12, 'K006'), (7, 10, 'K005'), ], }, { 'code': """ # this should not be an error L = log.getLogger(__name__) msg = _('text') L.warning(msg) raise Exception(msg) """, 'expected_errors': [], }, { 'code': """ L = log.getLogger(__name__) def f(): msg = _('text') L2.warning(msg) something = True # add an extra statement here raise Exception(msg) """, 'expected_errors': [], }, { 'code': """ LOG = log.getLogger(__name__) def func(): msg = _('text') LOG.warning(msg) raise Exception('some other message') """, 'expected_errors': [ (4, 16, 'K006'), ], }, { 'code': """ LOG = log.getLogger(__name__) if True: msg = _('text') else: msg = _('text') LOG.warning(msg) raise Exception(msg) """, 'expected_errors': [ ], }, { 'code': """ LOG = log.getLogger(__name__) if True: msg = _('text') else: msg = _('text') LOG.warning(msg) """, 'expected_errors': [ (6, 12, 'K006'), ], }, { 'code': """ LOG = log.getLogger(__name__) msg = _LW('text') LOG.warning(msg) raise Exception(msg) """, 'expected_errors': [ (3, 12, 'K007'), ], }, { 'code': """ LOG = log.getLogger(__name__) msg = _LW('text') LOG.warning(msg) msg = _('something else') raise Exception(msg) """, 'expected_errors': [], }, { 'code': """ LOG = log.getLogger(__name__) msg = _LW('hello %s') % 'world' LOG.warning(msg) raise Exception(msg) """, 'expected_errors': [ (3, 12, 'K007'), ], }, { 'code': """ LOG = log.getLogger(__name__) msg = _LW('hello %s') % 'world' LOG.warning(msg) """, 'expected_errors': [], }, { 'code': """ # this should not be an error LOG = log.getLogger(__name__) try: something = True except AssertionError as e: LOG.warning(six.text_type(e)) raise exception.Unauthorized(e) """, 'expected_errors': [], }, ] assert_not_using_deprecated_warn = { 'code': """ # Logger.warn has been deprecated in Python3 in favor of # Logger.warning LOG = log.getLogger(__name__) LOG.warn(_LW('text')) """, 'expected_errors': [ (4, 9, 'K009'), ], } assert_no_translations_for_debug_logging = { 'code': """ # stdlib logging L0 = logging.getLogger() L0.debug(_('text')) class C: def __init__(self): L0.debug(oslo_i18n('text', {})) # stdlib logging w/ alias and specifying a logger class C: def __init__(self): self.L1 = logging.getLogger(__name__) def m(self): self.L1.debug( _('text'), {} ) # oslo logging and specifying a logger L2 = logging.getLogger(__name__) L2.debug(oslo_i18n('text')) # oslo logging w/ alias class C: def __init__(self): self.L3 = oslo_logging.getLogger() self.L3.debug(_('text')) # translation on a separate line msg = _('text') L2.debug(msg) # this should not fail if True: msg = _('message %s') % X L2.error(msg) raise TypeError(msg) if True: msg = 'message' L2.debug(msg) # this should not fail if True: if True: msg = _('message') else: msg = _('message') L2.debug(msg) raise Exception(msg) """, 'expected_errors': [ (3, 9, 'K005'), (6, 17, 'K005'), (14, 12, 'K005'), (19, 9, 'K005'), (25, 22, 'K005'), (29, 9, 'K005'), ] } keystone-9.0.0/keystone/tests/unit/test_exception.py0000664000567000056710000002502112701407102024105 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from oslo_config import cfg from oslo_config import fixture as config_fixture from oslo_serialization import jsonutils import six from keystone.common import wsgi from keystone import exception from keystone.tests import unit class ExceptionTestCase(unit.BaseTestCase): def assertValidJsonRendering(self, e): resp = wsgi.render_exception(e) self.assertEqual(e.code, resp.status_int) self.assertEqual('%s %s' % (e.code, e.title), resp.status) j = jsonutils.loads(resp.body) self.assertIsNotNone(j.get('error')) self.assertIsNotNone(j['error'].get('code')) self.assertIsNotNone(j['error'].get('title')) self.assertIsNotNone(j['error'].get('message')) self.assertNotIn('\n', j['error']['message']) self.assertNotIn(' ', j['error']['message']) self.assertTrue(type(j['error']['code']) is int) def test_all_json_renderings(self): """Everything callable in the exception module should be renderable. ... except for the base error class (exception.Error), which is not user-facing. This test provides a custom message to bypass docstring parsing, which should be tested separately. """ for cls in [x for x in exception.__dict__.values() if callable(x)]: if cls is not exception.Error and isinstance(cls, exception.Error): self.assertValidJsonRendering(cls(message='Overridden.')) def test_validation_error(self): target = uuid.uuid4().hex attribute = uuid.uuid4().hex e = exception.ValidationError(target=target, attribute=attribute) self.assertValidJsonRendering(e) self.assertIn(target, six.text_type(e)) self.assertIn(attribute, six.text_type(e)) def test_not_found(self): target = uuid.uuid4().hex e = exception.NotFound(target=target) self.assertValidJsonRendering(e) self.assertIn(target, six.text_type(e)) def test_forbidden_title(self): e = exception.Forbidden() resp = wsgi.render_exception(e) j = jsonutils.loads(resp.body) self.assertEqual('Forbidden', e.title) self.assertEqual('Forbidden', j['error'].get('title')) def test_unicode_message(self): message = u'Comment \xe7a va' e = exception.Error(message) try: self.assertEqual(message, six.text_type(e)) except UnicodeEncodeError: self.fail("unicode error message not supported") def test_unicode_string(self): e = exception.ValidationError(attribute='xx', target='Long \xe2\x80\x93 Dash') if six.PY2: self.assertIn(u'\u2013', six.text_type(e)) else: self.assertIn('Long \xe2\x80\x93 Dash', six.text_type(e)) def test_invalid_unicode_string(self): # NOTE(jamielennox): This is a complete failure case so what is # returned in the exception message is not that important so long # as there is an error with a message e = exception.ValidationError(attribute='xx', target='\xe7a va') if six.PY2: self.assertIn('%(attribute)', six.text_type(e)) else: # There's no UnicodeDecodeError on python 3. self.assertIn('\xe7a va', six.text_type(e)) class UnexpectedExceptionTestCase(ExceptionTestCase): """Tests if internal info is exposed to the API user on UnexpectedError.""" class SubClassExc(exception.UnexpectedError): debug_message_format = 'Debug Message: %(debug_info)s' def setUp(self): super(UnexpectedExceptionTestCase, self).setUp() self.exc_str = uuid.uuid4().hex self.config_fixture = self.useFixture(config_fixture.Config(cfg.CONF)) def test_unexpected_error_no_debug(self): self.config_fixture.config(debug=False) e = exception.UnexpectedError(exception=self.exc_str) self.assertNotIn(self.exc_str, six.text_type(e)) def test_unexpected_error_debug(self): self.config_fixture.config(debug=True, insecure_debug=True) e = exception.UnexpectedError(exception=self.exc_str) self.assertIn(self.exc_str, six.text_type(e)) def test_unexpected_error_subclass_no_debug(self): self.config_fixture.config(debug=False) e = UnexpectedExceptionTestCase.SubClassExc( debug_info=self.exc_str) self.assertEqual(exception.UnexpectedError.message_format, six.text_type(e)) def test_unexpected_error_subclass_debug(self): self.config_fixture.config(debug=True, insecure_debug=True) subclass = self.SubClassExc e = subclass(debug_info=self.exc_str) expected = subclass.debug_message_format % {'debug_info': self.exc_str} self.assertEqual( '%s %s' % (expected, exception.SecurityError.amendment), six.text_type(e)) def test_unexpected_error_custom_message_no_debug(self): self.config_fixture.config(debug=False) e = exception.UnexpectedError(self.exc_str) self.assertEqual(exception.UnexpectedError.message_format, six.text_type(e)) def test_unexpected_error_custom_message_debug(self): self.config_fixture.config(debug=True, insecure_debug=True) e = exception.UnexpectedError(self.exc_str) self.assertEqual( '%s %s' % (self.exc_str, exception.SecurityError.amendment), six.text_type(e)) def test_unexpected_error_custom_message_exception_debug(self): self.config_fixture.config(debug=True, insecure_debug=True) orig_e = exception.NotFound(target=uuid.uuid4().hex) e = exception.UnexpectedError(orig_e) self.assertEqual( '%s %s' % (six.text_type(orig_e), exception.SecurityError.amendment), six.text_type(e)) def test_unexpected_error_custom_message_binary_debug(self): self.config_fixture.config(debug=True, insecure_debug=True) binary_msg = b'something' e = exception.UnexpectedError(binary_msg) self.assertEqual( '%s %s' % (six.text_type(binary_msg), exception.SecurityError.amendment), six.text_type(e)) class SecurityErrorTestCase(ExceptionTestCase): """Tests whether security-related info is exposed to the API user.""" def setUp(self): super(SecurityErrorTestCase, self).setUp() self.config_fixture = self.useFixture(config_fixture.Config(cfg.CONF)) def test_unauthorized_exposure(self): self.config_fixture.config(debug=False) risky_info = uuid.uuid4().hex e = exception.Unauthorized(message=risky_info) self.assertValidJsonRendering(e) self.assertNotIn(risky_info, six.text_type(e)) def test_unauthorized_exposure_in_debug(self): self.config_fixture.config(debug=True, insecure_debug=True) risky_info = uuid.uuid4().hex e = exception.Unauthorized(message=risky_info) self.assertValidJsonRendering(e) self.assertIn(risky_info, six.text_type(e)) def test_forbidden_exposure(self): self.config_fixture.config(debug=False) risky_info = uuid.uuid4().hex e = exception.Forbidden(message=risky_info) self.assertValidJsonRendering(e) self.assertNotIn(risky_info, six.text_type(e)) def test_forbidden_exposure_in_debug(self): self.config_fixture.config(debug=True, insecure_debug=True) risky_info = uuid.uuid4().hex e = exception.Forbidden(message=risky_info) self.assertValidJsonRendering(e) self.assertIn(risky_info, six.text_type(e)) def test_forbidden_action_exposure(self): self.config_fixture.config(debug=False) risky_info = uuid.uuid4().hex action = uuid.uuid4().hex e = exception.ForbiddenAction(message=risky_info, action=action) self.assertValidJsonRendering(e) self.assertNotIn(risky_info, six.text_type(e)) self.assertIn(action, six.text_type(e)) self.assertNotIn(exception.SecurityError.amendment, six.text_type(e)) e = exception.ForbiddenAction(action=action) self.assertValidJsonRendering(e) self.assertIn(action, six.text_type(e)) self.assertNotIn(exception.SecurityError.amendment, six.text_type(e)) def test_forbidden_action_exposure_in_debug(self): self.config_fixture.config(debug=True, insecure_debug=True) risky_info = uuid.uuid4().hex action = uuid.uuid4().hex e = exception.ForbiddenAction(message=risky_info, action=action) self.assertValidJsonRendering(e) self.assertIn(risky_info, six.text_type(e)) self.assertIn(exception.SecurityError.amendment, six.text_type(e)) e = exception.ForbiddenAction(action=action) self.assertValidJsonRendering(e) self.assertIn(action, six.text_type(e)) self.assertNotIn(exception.SecurityError.amendment, six.text_type(e)) def test_forbidden_action_no_message(self): # When no custom message is given when the ForbiddenAction (or other # SecurityError subclass) is created the exposed message is the same # whether debug is enabled or not. action = uuid.uuid4().hex self.config_fixture.config(debug=False) e = exception.ForbiddenAction(action=action) exposed_message = six.text_type(e) self.assertIn(action, exposed_message) self.assertNotIn(exception.SecurityError.amendment, six.text_type(e)) self.config_fixture.config(debug=True) e = exception.ForbiddenAction(action=action) self.assertEqual(exposed_message, six.text_type(e)) def test_unicode_argument_message(self): self.config_fixture.config(debug=False) risky_info = u'\u7ee7\u7eed\u884c\u7f29\u8fdb\u6216' e = exception.Forbidden(message=risky_info) self.assertValidJsonRendering(e) self.assertNotIn(risky_info, six.text_type(e)) keystone-9.0.0/keystone/tests/unit/test_v3_credential.py0000664000567000056710000004756012701407102024645 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import hashlib import json import uuid from keystoneclient.contrib.ec2 import utils as ec2_utils from oslo_config import cfg from six.moves import http_client from testtools import matchers from keystone.common import utils from keystone.contrib.ec2 import controllers from keystone import exception from keystone.tests import unit from keystone.tests.unit import test_v3 CONF = cfg.CONF CRED_TYPE_EC2 = controllers.CRED_TYPE_EC2 class CredentialBaseTestCase(test_v3.RestfulTestCase): def _create_dict_blob_credential(self): blob, credential = unit.new_ec2_credential(user_id=self.user['id'], project_id=self.project_id) # Store the blob as a dict *not* JSON ref bug #1259584 # This means we can test the dict->json workaround, added # as part of the bugfix for backwards compatibility works. credential['blob'] = blob credential_id = credential['id'] # Create direct via the DB API to avoid validation failure self.credential_api.create_credential(credential_id, credential) return json.dumps(blob), credential_id class CredentialTestCase(CredentialBaseTestCase): """Test credential CRUD.""" def setUp(self): super(CredentialTestCase, self).setUp() self.credential = unit.new_credential_ref(user_id=self.user['id'], project_id=self.project_id) self.credential_api.create_credential( self.credential['id'], self.credential) def test_credential_api_delete_credentials_for_project(self): self.credential_api.delete_credentials_for_project(self.project_id) # Test that the credential that we created in .setUp no longer exists # once we delete all credentials for self.project_id self.assertRaises(exception.CredentialNotFound, self.credential_api.get_credential, credential_id=self.credential['id']) def test_credential_api_delete_credentials_for_user(self): self.credential_api.delete_credentials_for_user(self.user_id) # Test that the credential that we created in .setUp no longer exists # once we delete all credentials for self.user_id self.assertRaises(exception.CredentialNotFound, self.credential_api.get_credential, credential_id=self.credential['id']) def test_list_credentials(self): """Call ``GET /credentials``.""" r = self.get('/credentials') self.assertValidCredentialListResponse(r, ref=self.credential) def test_list_credentials_filtered_by_user_id(self): """Call ``GET /credentials?user_id={user_id}``.""" credential = unit.new_credential_ref(user_id=uuid.uuid4().hex) self.credential_api.create_credential(credential['id'], credential) r = self.get('/credentials?user_id=%s' % self.user['id']) self.assertValidCredentialListResponse(r, ref=self.credential) for cred in r.result['credentials']: self.assertEqual(self.user['id'], cred['user_id']) def test_list_credentials_filtered_by_type(self): """Call ``GET /credentials?type={type}``.""" # The type ec2 was chosen, instead of a random string, # because the type must be in the list of supported types ec2_credential = unit.new_credential_ref(user_id=uuid.uuid4().hex, project_id=self.project_id, type=CRED_TYPE_EC2) ec2_resp = self.credential_api.create_credential( ec2_credential['id'], ec2_credential) # The type cert was chosen for the same reason as ec2 r = self.get('/credentials?type=cert') # Testing the filter for two different types self.assertValidCredentialListResponse(r, ref=self.credential) for cred in r.result['credentials']: self.assertEqual('cert', cred['type']) r_ec2 = self.get('/credentials?type=ec2') self.assertThat(r_ec2.result['credentials'], matchers.HasLength(1)) cred_ec2 = r_ec2.result['credentials'][0] self.assertValidCredentialListResponse(r_ec2, ref=ec2_resp) self.assertEqual(CRED_TYPE_EC2, cred_ec2['type']) self.assertEqual(ec2_credential['id'], cred_ec2['id']) def test_list_credentials_filtered_by_type_and_user_id(self): """Call ``GET /credentials?user_id={user_id}&type={type}``.""" user1_id = uuid.uuid4().hex user2_id = uuid.uuid4().hex # Creating credentials for two different users credential_user1_ec2 = unit.new_credential_ref(user_id=user1_id, type=CRED_TYPE_EC2) credential_user1_cert = unit.new_credential_ref(user_id=user1_id) credential_user2_cert = unit.new_credential_ref(user_id=user2_id) self.credential_api.create_credential( credential_user1_ec2['id'], credential_user1_ec2) self.credential_api.create_credential( credential_user1_cert['id'], credential_user1_cert) self.credential_api.create_credential( credential_user2_cert['id'], credential_user2_cert) r = self.get('/credentials?user_id=%s&type=ec2' % user1_id) self.assertValidCredentialListResponse(r, ref=credential_user1_ec2) self.assertThat(r.result['credentials'], matchers.HasLength(1)) cred = r.result['credentials'][0] self.assertEqual(CRED_TYPE_EC2, cred['type']) self.assertEqual(user1_id, cred['user_id']) def test_create_credential(self): """Call ``POST /credentials``.""" ref = unit.new_credential_ref(user_id=self.user['id']) r = self.post( '/credentials', body={'credential': ref}) self.assertValidCredentialResponse(r, ref) def test_get_credential(self): """Call ``GET /credentials/{credential_id}``.""" r = self.get( '/credentials/%(credential_id)s' % { 'credential_id': self.credential['id']}) self.assertValidCredentialResponse(r, self.credential) def test_update_credential(self): """Call ``PATCH /credentials/{credential_id}``.""" ref = unit.new_credential_ref(user_id=self.user['id'], project_id=self.project_id) del ref['id'] r = self.patch( '/credentials/%(credential_id)s' % { 'credential_id': self.credential['id']}, body={'credential': ref}) self.assertValidCredentialResponse(r, ref) def test_delete_credential(self): """Call ``DELETE /credentials/{credential_id}``.""" self.delete( '/credentials/%(credential_id)s' % { 'credential_id': self.credential['id']}) def test_create_ec2_credential(self): """Call ``POST /credentials`` for creating ec2 credential.""" blob, ref = unit.new_ec2_credential(user_id=self.user['id'], project_id=self.project_id) r = self.post('/credentials', body={'credential': ref}) self.assertValidCredentialResponse(r, ref) # Assert credential id is same as hash of access key id for # ec2 credentials access = blob['access'].encode('utf-8') self.assertEqual(hashlib.sha256(access).hexdigest(), r.result['credential']['id']) # Create second ec2 credential with the same access key id and check # for conflict. self.post( '/credentials', body={'credential': ref}, expected_status=http_client.CONFLICT) def test_get_ec2_dict_blob(self): """Ensure non-JSON blob data is correctly converted.""" expected_blob, credential_id = self._create_dict_blob_credential() r = self.get( '/credentials/%(credential_id)s' % { 'credential_id': credential_id}) # use json.loads to transform the blobs back into Python dictionaries # to avoid problems with the keys being in different orders. self.assertEqual(json.loads(expected_blob), json.loads(r.result['credential']['blob'])) def test_list_ec2_dict_blob(self): """Ensure non-JSON blob data is correctly converted.""" expected_blob, credential_id = self._create_dict_blob_credential() list_r = self.get('/credentials') list_creds = list_r.result['credentials'] list_ids = [r['id'] for r in list_creds] self.assertIn(credential_id, list_ids) # use json.loads to transform the blobs back into Python dictionaries # to avoid problems with the keys being in different orders. for r in list_creds: if r['id'] == credential_id: self.assertEqual(json.loads(expected_blob), json.loads(r['blob'])) def test_create_non_ec2_credential(self): """Test creating non-ec2 credential. Call ``POST /credentials``. """ blob, ref = unit.new_cert_credential(user_id=self.user['id']) r = self.post('/credentials', body={'credential': ref}) self.assertValidCredentialResponse(r, ref) # Assert credential id is not same as hash of access key id for # non-ec2 credentials access = blob['access'].encode('utf-8') self.assertNotEqual(hashlib.sha256(access).hexdigest(), r.result['credential']['id']) def test_create_ec2_credential_with_missing_project_id(self): """Test Creating ec2 credential with missing project_id. Call ``POST /credentials``. """ _, ref = unit.new_ec2_credential(user_id=self.user['id'], project_id=None) # Assert bad request status when missing project_id self.post( '/credentials', body={'credential': ref}, expected_status=http_client.BAD_REQUEST) def test_create_ec2_credential_with_invalid_blob(self): """Test creating ec2 credential with invalid blob. Call ``POST /credentials``. """ ref = unit.new_credential_ref(user_id=self.user['id'], project_id=self.project_id, blob='{"abc":"def"d}', type=CRED_TYPE_EC2) # Assert bad request status when request contains invalid blob response = self.post( '/credentials', body={'credential': ref}, expected_status=http_client.BAD_REQUEST) self.assertValidErrorResponse(response) def test_create_credential_with_admin_token(self): # Make sure we can create credential with the static admin token ref = unit.new_credential_ref(user_id=self.user['id']) r = self.post( '/credentials', body={'credential': ref}, token=self.get_admin_token()) self.assertValidCredentialResponse(r, ref) class TestCredentialTrustScoped(test_v3.RestfulTestCase): """Test credential with trust scoped token.""" def setUp(self): super(TestCredentialTrustScoped, self).setUp() self.trustee_user = unit.new_user_ref(domain_id=self.domain_id) password = self.trustee_user['password'] self.trustee_user = self.identity_api.create_user(self.trustee_user) self.trustee_user['password'] = password self.trustee_user_id = self.trustee_user['id'] def config_overrides(self): super(TestCredentialTrustScoped, self).config_overrides() self.config_fixture.config(group='trust', enabled=True) def test_trust_scoped_ec2_credential(self): """Test creating trust scoped ec2 credential. Call ``POST /credentials``. """ # Create the trust ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user_id, project_id=self.project_id, impersonation=True, expires=dict(minutes=1), role_ids=[self.role_id]) del ref['id'] r = self.post('/OS-TRUST/trusts', body={'trust': ref}) trust = self.assertValidTrustResponse(r) # Get a trust scoped token auth_data = self.build_authentication_request( user_id=self.trustee_user['id'], password=self.trustee_user['password'], trust_id=trust['id']) r = self.v3_create_token(auth_data) self.assertValidProjectScopedTokenResponse(r, self.user) trust_id = r.result['token']['OS-TRUST:trust']['id'] token_id = r.headers.get('X-Subject-Token') # Create the credential with the trust scoped token blob, ref = unit.new_ec2_credential(user_id=self.user['id'], project_id=self.project_id) r = self.post('/credentials', body={'credential': ref}, token=token_id) # We expect the response blob to contain the trust_id ret_ref = ref.copy() ret_blob = blob.copy() ret_blob['trust_id'] = trust_id ret_ref['blob'] = json.dumps(ret_blob) self.assertValidCredentialResponse(r, ref=ret_ref) # Assert credential id is same as hash of access key id for # ec2 credentials access = blob['access'].encode('utf-8') self.assertEqual(hashlib.sha256(access).hexdigest(), r.result['credential']['id']) # Create second ec2 credential with the same access key id and check # for conflict. self.post( '/credentials', body={'credential': ref}, token=token_id, expected_status=http_client.CONFLICT) class TestCredentialEc2(CredentialBaseTestCase): """Test v3 credential compatibility with ec2tokens.""" def setUp(self): super(TestCredentialEc2, self).setUp() def _validate_signature(self, access, secret): """Test signature validation with the access/secret provided.""" signer = ec2_utils.Ec2Signer(secret) params = {'SignatureMethod': 'HmacSHA256', 'SignatureVersion': '2', 'AWSAccessKeyId': access} request = {'host': 'foo', 'verb': 'GET', 'path': '/bar', 'params': params} signature = signer.generate(request) # Now make a request to validate the signed dummy request via the # ec2tokens API. This proves the v3 ec2 credentials actually work. sig_ref = {'access': access, 'signature': signature, 'host': 'foo', 'verb': 'GET', 'path': '/bar', 'params': params} r = self.post( '/ec2tokens', body={'ec2Credentials': sig_ref}, expected_status=http_client.OK) self.assertValidTokenResponse(r) def test_ec2_credential_signature_validate(self): """Test signature validation with a v3 ec2 credential.""" blob, ref = unit.new_ec2_credential(user_id=self.user['id'], project_id=self.project_id) r = self.post('/credentials', body={'credential': ref}) self.assertValidCredentialResponse(r, ref) # Assert credential id is same as hash of access key id access = blob['access'].encode('utf-8') self.assertEqual(hashlib.sha256(access).hexdigest(), r.result['credential']['id']) cred_blob = json.loads(r.result['credential']['blob']) self.assertEqual(blob, cred_blob) self._validate_signature(access=cred_blob['access'], secret=cred_blob['secret']) def test_ec2_credential_signature_validate_legacy(self): """Test signature validation with a legacy v3 ec2 credential.""" cred_json, _ = self._create_dict_blob_credential() cred_blob = json.loads(cred_json) self._validate_signature(access=cred_blob['access'], secret=cred_blob['secret']) def _get_ec2_cred_uri(self): return '/users/%s/credentials/OS-EC2' % self.user_id def _get_ec2_cred(self): uri = self._get_ec2_cred_uri() r = self.post(uri, body={'tenant_id': self.project_id}) return r.result['credential'] def test_ec2_create_credential(self): """Test ec2 credential creation.""" ec2_cred = self._get_ec2_cred() self.assertEqual(self.user_id, ec2_cred['user_id']) self.assertEqual(self.project_id, ec2_cred['tenant_id']) self.assertIsNone(ec2_cred['trust_id']) self._validate_signature(access=ec2_cred['access'], secret=ec2_cred['secret']) uri = '/'.join([self._get_ec2_cred_uri(), ec2_cred['access']]) self.assertThat(ec2_cred['links']['self'], matchers.EndsWith(uri)) def test_ec2_get_credential(self): ec2_cred = self._get_ec2_cred() uri = '/'.join([self._get_ec2_cred_uri(), ec2_cred['access']]) r = self.get(uri) self.assertDictEqual(ec2_cred, r.result['credential']) self.assertThat(ec2_cred['links']['self'], matchers.EndsWith(uri)) def test_ec2_cannot_get_non_ec2_credential(self): access_key = uuid.uuid4().hex cred_id = utils.hash_access_key(access_key) non_ec2_cred = unit.new_credential_ref( user_id=self.user_id, project_id=self.project_id) non_ec2_cred['id'] = cred_id self.credential_api.create_credential(cred_id, non_ec2_cred) uri = '/'.join([self._get_ec2_cred_uri(), access_key]) # if access_key is not found, ec2 controller raises Unauthorized # exception self.get(uri, expected_status=http_client.UNAUTHORIZED) def test_ec2_list_credentials(self): """Test ec2 credential listing.""" self._get_ec2_cred() uri = self._get_ec2_cred_uri() r = self.get(uri) cred_list = r.result['credentials'] self.assertEqual(1, len(cred_list)) self.assertThat(r.result['links']['self'], matchers.EndsWith(uri)) # non-EC2 credentials won't be fetched non_ec2_cred = unit.new_credential_ref( user_id=self.user_id, project_id=self.project_id) non_ec2_cred['type'] = uuid.uuid4().hex self.credential_api.create_credential(non_ec2_cred['id'], non_ec2_cred) r = self.get(uri) cred_list_2 = r.result['credentials'] # still one element because non-EC2 credentials are not returned. self.assertEqual(1, len(cred_list_2)) self.assertEqual(cred_list[0], cred_list_2[0]) def test_ec2_delete_credential(self): """Test ec2 credential deletion.""" ec2_cred = self._get_ec2_cred() uri = '/'.join([self._get_ec2_cred_uri(), ec2_cred['access']]) cred_from_credential_api = ( self.credential_api .list_credentials_for_user(self.user_id, type=CRED_TYPE_EC2)) self.assertEqual(1, len(cred_from_credential_api)) self.delete(uri) self.assertRaises(exception.CredentialNotFound, self.credential_api.get_credential, cred_from_credential_api[0]['id']) keystone-9.0.0/keystone/tests/unit/test_backend_sql.py0000664000567000056710000012632012701407102024361 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- # Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import uuid import mock from oslo_config import cfg from oslo_db import exception as db_exception from oslo_db import options from six.moves import range import sqlalchemy from sqlalchemy import exc from testtools import matchers from keystone.common import driver_hints from keystone.common import sql from keystone import exception from keystone.identity.backends import sql as identity_sql from keystone import resource from keystone.tests import unit from keystone.tests.unit.assignment import test_backends as assignment_tests from keystone.tests.unit.catalog import test_backends as catalog_tests from keystone.tests.unit import default_fixtures from keystone.tests.unit.identity import test_backends as identity_tests from keystone.tests.unit.ksfixtures import database from keystone.tests.unit.policy import test_backends as policy_tests from keystone.tests.unit.resource import test_backends as resource_tests from keystone.tests.unit.token import test_backends as token_tests from keystone.tests.unit.trust import test_backends as trust_tests from keystone.token.persistence.backends import sql as token_sql CONF = cfg.CONF class SqlTests(unit.SQLDriverOverrides, unit.TestCase): def setUp(self): super(SqlTests, self).setUp() self.useFixture(database.Database(self.sql_driver_version_overrides)) self.load_backends() # populate the engine with tables & fixtures self.load_fixtures(default_fixtures) # defaulted by the data load self.user_foo['enabled'] = True def config_files(self): config_files = super(SqlTests, self).config_files() config_files.append(unit.dirs.tests_conf('backend_sql.conf')) return config_files class SqlModels(SqlTests): def select_table(self, name): table = sqlalchemy.Table(name, sql.ModelBase.metadata, autoload=True) s = sqlalchemy.select([table]) return s def assertExpectedSchema(self, table, expected_schema): """Assert that a table's schema is what we expect. :param string table: the name of the table to inspect :param tuple expected_schema: a tuple of tuples containing the expected schema :raises AssertionError: when the database schema doesn't match the expected schema The expected_schema format is simply:: ( ('column name', sql type, qualifying detail), ... ) The qualifying detail varies based on the type of the column:: - sql.Boolean columns must indicate the column's default value or None if there is no default - Columns with a length, like sql.String, must indicate the column's length - All other column types should use None Example:: cols = (('id', sql.String, 64), ('enabled', sql.Boolean, True), ('extra', sql.JsonBlob, None)) self.assertExpectedSchema('table_name', cols) """ table = self.select_table(table) actual_schema = [] for column in table.c: if isinstance(column.type, sql.Boolean): default = None if column._proxies[0].default: default = column._proxies[0].default.arg actual_schema.append((column.name, type(column.type), default)) elif (hasattr(column.type, 'length') and not isinstance(column.type, sql.Enum)): # NOTE(dstanek): Even though sql.Enum columns have a length # set we don't want to catch them here. Maybe in the future # we'll check to see that they contain a list of the correct # possible values. actual_schema.append((column.name, type(column.type), column.type.length)) else: actual_schema.append((column.name, type(column.type), None)) self.assertItemsEqual(expected_schema, actual_schema) def test_user_model(self): cols = (('id', sql.String, 64), ('default_project_id', sql.String, 64), ('enabled', sql.Boolean, None), ('extra', sql.JsonBlob, None)) self.assertExpectedSchema('user', cols) def test_local_user_model(self): cols = (('id', sql.Integer, None), ('user_id', sql.String, 64), ('name', sql.String, 255), ('domain_id', sql.String, 64)) self.assertExpectedSchema('local_user', cols) def test_password_model(self): cols = (('id', sql.Integer, None), ('local_user_id', sql.Integer, None), ('password', sql.String, 128)) self.assertExpectedSchema('password', cols) def test_federated_user_model(self): cols = (('id', sql.Integer, None), ('user_id', sql.String, 64), ('idp_id', sql.String, 64), ('protocol_id', sql.String, 64), ('unique_id', sql.String, 255), ('display_name', sql.String, 255)) self.assertExpectedSchema('federated_user', cols) def test_group_model(self): cols = (('id', sql.String, 64), ('name', sql.String, 64), ('description', sql.Text, None), ('domain_id', sql.String, 64), ('extra', sql.JsonBlob, None)) self.assertExpectedSchema('group', cols) def test_domain_model(self): cols = (('id', sql.String, 64), ('name', sql.String, 64), ('enabled', sql.Boolean, True), ('extra', sql.JsonBlob, None)) self.assertExpectedSchema('domain', cols) def test_project_model(self): cols = (('id', sql.String, 64), ('name', sql.String, 64), ('description', sql.Text, None), ('domain_id', sql.String, 64), ('enabled', sql.Boolean, None), ('extra', sql.JsonBlob, None), ('parent_id', sql.String, 64), ('is_domain', sql.Boolean, False)) self.assertExpectedSchema('project', cols) def test_role_assignment_model(self): cols = (('type', sql.Enum, None), ('actor_id', sql.String, 64), ('target_id', sql.String, 64), ('role_id', sql.String, 64), ('inherited', sql.Boolean, False)) self.assertExpectedSchema('assignment', cols) def test_user_group_membership(self): cols = (('group_id', sql.String, 64), ('user_id', sql.String, 64)) self.assertExpectedSchema('user_group_membership', cols) def test_revocation_event_model(self): cols = (('id', sql.Integer, None), ('domain_id', sql.String, 64), ('project_id', sql.String, 64), ('user_id', sql.String, 64), ('role_id', sql.String, 64), ('trust_id', sql.String, 64), ('consumer_id', sql.String, 64), ('access_token_id', sql.String, 64), ('issued_before', sql.DateTime, None), ('expires_at', sql.DateTime, None), ('revoked_at', sql.DateTime, None), ('audit_id', sql.String, 32), ('audit_chain_id', sql.String, 32)) self.assertExpectedSchema('revocation_event', cols) class SqlIdentity(SqlTests, identity_tests.IdentityTests, assignment_tests.AssignmentTests, resource_tests.ResourceTests): def test_password_hashed(self): with sql.session_for_read() as session: user_ref = self.identity_api._get_user(session, self.user_foo['id']) self.assertNotEqual(self.user_foo['password'], user_ref['password']) def test_create_user_with_null_password(self): user_dict = unit.new_user_ref( domain_id=CONF.identity.default_domain_id) user_dict["password"] = None new_user_dict = self.identity_api.create_user(user_dict) with sql.session_for_read() as session: new_user_ref = self.identity_api._get_user(session, new_user_dict['id']) self.assertFalse(new_user_ref.local_user.passwords) def test_update_user_with_null_password(self): user_dict = unit.new_user_ref( domain_id=CONF.identity.default_domain_id) self.assertTrue(user_dict['password']) new_user_dict = self.identity_api.create_user(user_dict) new_user_dict["password"] = None new_user_dict = self.identity_api.update_user(new_user_dict['id'], new_user_dict) with sql.session_for_read() as session: new_user_ref = self.identity_api._get_user(session, new_user_dict['id']) self.assertFalse(new_user_ref.local_user.passwords) def test_delete_user_with_project_association(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user = self.identity_api.create_user(user) self.assignment_api.add_user_to_project(self.tenant_bar['id'], user['id']) self.identity_api.delete_user(user['id']) self.assertRaises(exception.UserNotFound, self.assignment_api.list_projects_for_user, user['id']) def test_create_null_user_name(self): user = unit.new_user_ref(name=None, domain_id=CONF.identity.default_domain_id) self.assertRaises(exception.ValidationError, self.identity_api.create_user, user) self.assertRaises(exception.UserNotFound, self.identity_api.get_user_by_name, user['name'], CONF.identity.default_domain_id) def test_create_user_case_sensitivity(self): # user name case sensitivity is down to the fact that it is marked as # an SQL UNIQUE column, which may not be valid for other backends, like # LDAP. # create a ref with a lowercase name ref = unit.new_user_ref(name=uuid.uuid4().hex.lower(), domain_id=CONF.identity.default_domain_id) ref = self.identity_api.create_user(ref) # assign a new ID with the same name, but this time in uppercase ref['name'] = ref['name'].upper() self.identity_api.create_user(ref) def test_create_federated_user_unique_constraint(self): federated_dict = unit.new_federated_user_ref() user_dict = self.shadow_users_api.create_federated_user(federated_dict) user_dict = self.identity_api.get_user(user_dict["id"]) self.assertIsNotNone(user_dict["id"]) self.assertRaises(exception.Conflict, self.shadow_users_api.create_federated_user, federated_dict) def test_get_federated_user(self): federated_dict = unit.new_federated_user_ref() user_dict_create = self.shadow_users_api.create_federated_user( federated_dict) user_dict_get = self.shadow_users_api.get_federated_user( federated_dict["idp_id"], federated_dict["protocol_id"], federated_dict["unique_id"]) self.assertItemsEqual(user_dict_create, user_dict_get) self.assertEqual(user_dict_create["id"], user_dict_get["id"]) def test_update_federated_user_display_name(self): federated_dict = unit.new_federated_user_ref() user_dict_create = self.shadow_users_api.create_federated_user( federated_dict) new_display_name = uuid.uuid4().hex self.shadow_users_api.update_federated_user_display_name( federated_dict["idp_id"], federated_dict["protocol_id"], federated_dict["unique_id"], new_display_name) user_ref = self.shadow_users_api._get_federated_user( federated_dict["idp_id"], federated_dict["protocol_id"], federated_dict["unique_id"]) self.assertEqual(user_ref.federated_users[0].display_name, new_display_name) self.assertEqual(user_dict_create["id"], user_ref.id) def test_create_project_case_sensitivity(self): # project name case sensitivity is down to the fact that it is marked # as an SQL UNIQUE column, which may not be valid for other backends, # like LDAP. # create a ref with a lowercase name ref = unit.new_project_ref(domain_id=CONF.identity.default_domain_id) self.resource_api.create_project(ref['id'], ref) # assign a new ID with the same name, but this time in uppercase ref['id'] = uuid.uuid4().hex ref['name'] = ref['name'].upper() self.resource_api.create_project(ref['id'], ref) def test_create_null_project_name(self): project = unit.new_project_ref( name=None, domain_id=CONF.identity.default_domain_id) self.assertRaises(exception.ValidationError, self.resource_api.create_project, project['id'], project) self.assertRaises(exception.ProjectNotFound, self.resource_api.get_project, project['id']) self.assertRaises(exception.ProjectNotFound, self.resource_api.get_project_by_name, project['name'], CONF.identity.default_domain_id) def test_delete_project_with_user_association(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user = self.identity_api.create_user(user) self.assignment_api.add_user_to_project(self.tenant_bar['id'], user['id']) self.resource_api.delete_project(self.tenant_bar['id']) tenants = self.assignment_api.list_projects_for_user(user['id']) self.assertEqual([], tenants) def test_update_project_returns_extra(self): """This tests for backwards-compatibility with an essex/folsom bug. Non-indexed attributes were returned in an 'extra' attribute, instead of on the entity itself; for consistency and backwards compatibility, those attributes should be included twice. This behavior is specific to the SQL driver. """ arbitrary_key = uuid.uuid4().hex arbitrary_value = uuid.uuid4().hex project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id) project[arbitrary_key] = arbitrary_value ref = self.resource_api.create_project(project['id'], project) self.assertEqual(arbitrary_value, ref[arbitrary_key]) self.assertIsNone(ref.get('extra')) ref['name'] = uuid.uuid4().hex ref = self.resource_api.update_project(ref['id'], ref) self.assertEqual(arbitrary_value, ref[arbitrary_key]) self.assertEqual(arbitrary_value, ref['extra'][arbitrary_key]) def test_update_user_returns_extra(self): """This tests for backwards-compatibility with an essex/folsom bug. Non-indexed attributes were returned in an 'extra' attribute, instead of on the entity itself; for consistency and backwards compatibility, those attributes should be included twice. This behavior is specific to the SQL driver. """ arbitrary_key = uuid.uuid4().hex arbitrary_value = uuid.uuid4().hex user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user[arbitrary_key] = arbitrary_value del user["id"] ref = self.identity_api.create_user(user) self.assertEqual(arbitrary_value, ref[arbitrary_key]) self.assertIsNone(ref.get('password')) self.assertIsNone(ref.get('extra')) user['name'] = uuid.uuid4().hex user['password'] = uuid.uuid4().hex ref = self.identity_api.update_user(ref['id'], user) self.assertIsNone(ref.get('password')) self.assertIsNone(ref['extra'].get('password')) self.assertEqual(arbitrary_value, ref[arbitrary_key]) self.assertEqual(arbitrary_value, ref['extra'][arbitrary_key]) def test_sql_user_to_dict_null_default_project_id(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user = self.identity_api.create_user(user) with sql.session_for_read() as session: query = session.query(identity_sql.User) query = query.filter_by(id=user['id']) raw_user_ref = query.one() self.assertIsNone(raw_user_ref.default_project_id) user_ref = raw_user_ref.to_dict() self.assertNotIn('default_project_id', user_ref) session.close() def test_list_domains_for_user(self): domain = unit.new_domain_ref() self.resource_api.create_domain(domain['id'], domain) user = unit.new_user_ref(domain_id=domain['id']) test_domain1 = unit.new_domain_ref() self.resource_api.create_domain(test_domain1['id'], test_domain1) test_domain2 = unit.new_domain_ref() self.resource_api.create_domain(test_domain2['id'], test_domain2) user = self.identity_api.create_user(user) user_domains = self.assignment_api.list_domains_for_user(user['id']) self.assertEqual(0, len(user_domains)) self.assignment_api.create_grant(user_id=user['id'], domain_id=test_domain1['id'], role_id=self.role_member['id']) self.assignment_api.create_grant(user_id=user['id'], domain_id=test_domain2['id'], role_id=self.role_member['id']) user_domains = self.assignment_api.list_domains_for_user(user['id']) self.assertThat(user_domains, matchers.HasLength(2)) def test_list_domains_for_user_with_grants(self): # Create two groups each with a role on a different domain, and # make user1 a member of both groups. Both these new domains # should now be included, along with any direct user grants. domain = unit.new_domain_ref() self.resource_api.create_domain(domain['id'], domain) user = unit.new_user_ref(domain_id=domain['id']) user = self.identity_api.create_user(user) group1 = unit.new_group_ref(domain_id=domain['id']) group1 = self.identity_api.create_group(group1) group2 = unit.new_group_ref(domain_id=domain['id']) group2 = self.identity_api.create_group(group2) test_domain1 = unit.new_domain_ref() self.resource_api.create_domain(test_domain1['id'], test_domain1) test_domain2 = unit.new_domain_ref() self.resource_api.create_domain(test_domain2['id'], test_domain2) test_domain3 = unit.new_domain_ref() self.resource_api.create_domain(test_domain3['id'], test_domain3) self.identity_api.add_user_to_group(user['id'], group1['id']) self.identity_api.add_user_to_group(user['id'], group2['id']) # Create 3 grants, one user grant, the other two as group grants self.assignment_api.create_grant(user_id=user['id'], domain_id=test_domain1['id'], role_id=self.role_member['id']) self.assignment_api.create_grant(group_id=group1['id'], domain_id=test_domain2['id'], role_id=self.role_admin['id']) self.assignment_api.create_grant(group_id=group2['id'], domain_id=test_domain3['id'], role_id=self.role_admin['id']) user_domains = self.assignment_api.list_domains_for_user(user['id']) self.assertThat(user_domains, matchers.HasLength(3)) def test_list_domains_for_user_with_inherited_grants(self): """Test that inherited roles on the domain are excluded. Test Plan: - Create two domains, one user, group and role - Domain1 is given an inherited user role, Domain2 an inherited group role (for a group of which the user is a member) - When listing domains for user, neither domain should be returned """ domain1 = unit.new_domain_ref() domain1 = self.resource_api.create_domain(domain1['id'], domain1) domain2 = unit.new_domain_ref() domain2 = self.resource_api.create_domain(domain2['id'], domain2) user = unit.new_user_ref(domain_id=domain1['id']) user = self.identity_api.create_user(user) group = unit.new_group_ref(domain_id=domain1['id']) group = self.identity_api.create_group(group) self.identity_api.add_user_to_group(user['id'], group['id']) role = unit.new_role_ref() self.role_api.create_role(role['id'], role) # Create a grant on each domain, one user grant, one group grant, # both inherited. self.assignment_api.create_grant(user_id=user['id'], domain_id=domain1['id'], role_id=role['id'], inherited_to_projects=True) self.assignment_api.create_grant(group_id=group['id'], domain_id=domain2['id'], role_id=role['id'], inherited_to_projects=True) user_domains = self.assignment_api.list_domains_for_user(user['id']) # No domains should be returned since both domains have only inherited # roles assignments. self.assertThat(user_domains, matchers.HasLength(0)) def test_storing_null_domain_id_in_project_ref(self): """Test the special storage of domain_id=None in sql resource driver. The resource driver uses a special value in place of None for domain_id in the project record. This shouldn't escape the driver. Hence we test the interface to ensure that you can store a domain_id of None, and that any special value used inside the driver does not escape through the interface. """ spoiler_project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id) self.resource_api.create_project(spoiler_project['id'], spoiler_project) # First let's create a project with a None domain_id and make sure we # can read it back. project = unit.new_project_ref(domain_id=None, is_domain=True) project = self.resource_api.create_project(project['id'], project) ref = self.resource_api.get_project(project['id']) self.assertDictEqual(project, ref) # Can we get it by name? ref = self.resource_api.get_project_by_name(project['name'], None) self.assertDictEqual(project, ref) # Can we filter for them - create a second domain to ensure we are # testing the receipt of more than one. project2 = unit.new_project_ref(domain_id=None, is_domain=True) project2 = self.resource_api.create_project(project2['id'], project2) hints = driver_hints.Hints() hints.add_filter('domain_id', None) refs = self.resource_api.list_projects(hints) self.assertThat(refs, matchers.HasLength(2 + self.domain_count)) self.assertIn(project, refs) self.assertIn(project2, refs) # Can we update it? project['name'] = uuid.uuid4().hex self.resource_api.update_project(project['id'], project) ref = self.resource_api.get_project(project['id']) self.assertDictEqual(project, ref) # Finally, make sure we can delete it project['enabled'] = False self.resource_api.update_project(project['id'], project) self.resource_api.delete_project(project['id']) self.assertRaises(exception.ProjectNotFound, self.resource_api.get_project, project['id']) def test_hidden_project_domain_root_is_really_hidden(self): """Ensure we cannot access the hidden root of all project domains. Calling any of the driver methods should result in the same as would be returned if we passed a project that does not exist. We don't test create_project, since we do not allow a caller of our API to specify their own ID for a new entity. """ def _exercise_project_api(ref_id): driver = self.resource_api.driver self.assertRaises(exception.ProjectNotFound, driver.get_project, ref_id) self.assertRaises(exception.ProjectNotFound, driver.get_project_by_name, resource.NULL_DOMAIN_ID, ref_id) project_ids = [x['id'] for x in driver.list_projects(driver_hints.Hints())] self.assertNotIn(ref_id, project_ids) projects = driver.list_projects_from_ids([ref_id]) self.assertThat(projects, matchers.HasLength(0)) project_ids = [x for x in driver.list_project_ids_from_domain_ids([ref_id])] self.assertNotIn(ref_id, project_ids) self.assertRaises(exception.DomainNotFound, driver.list_projects_in_domain, ref_id) project_ids = [ x['id'] for x in driver.list_projects_acting_as_domain(driver_hints.Hints())] self.assertNotIn(ref_id, project_ids) projects = driver.list_projects_in_subtree(ref_id) self.assertThat(projects, matchers.HasLength(0)) self.assertRaises(exception.ProjectNotFound, driver.list_project_parents, ref_id) # A non-existing project just returns True from the driver self.assertTrue(driver.is_leaf_project(ref_id)) self.assertRaises(exception.ProjectNotFound, driver.update_project, ref_id, {}) self.assertRaises(exception.ProjectNotFound, driver.delete_project, ref_id) # Deleting list of projects that includes a non-existing project # should be silent driver.delete_projects_from_ids([ref_id]) _exercise_project_api(uuid.uuid4().hex) _exercise_project_api(resource.NULL_DOMAIN_ID) class SqlTrust(SqlTests, trust_tests.TrustTests): pass class SqlToken(SqlTests, token_tests.TokenTests): def test_token_revocation_list_uses_right_columns(self): # This query used to be heavy with too many columns. We want # to make sure it is only running with the minimum columns # necessary. expected_query_args = (token_sql.TokenModel.id, token_sql.TokenModel.expires, token_sql.TokenModel.extra,) with mock.patch.object(token_sql, 'sql') as mock_sql: tok = token_sql.Token() tok.list_revoked_tokens() mock_query = mock_sql.session_for_read().__enter__().query mock_query.assert_called_with(*expected_query_args) def test_flush_expired_tokens_batch(self): # TODO(dstanek): This test should be rewritten to be less # brittle. The code will likely need to be changed first. I # just copied the spirit of the existing test when I rewrote # mox -> mock. These tests are brittle because they have the # call structure for SQLAlchemy encoded in them. # test sqlite dialect with mock.patch.object(token_sql, 'sql') as mock_sql: mock_sql.get_session().bind.dialect.name = 'sqlite' tok = token_sql.Token() tok.flush_expired_tokens() filter_mock = mock_sql.get_session().query().filter() self.assertFalse(filter_mock.limit.called) self.assertTrue(filter_mock.delete.called_once) def test_flush_expired_tokens_batch_mysql(self): # test mysql dialect, we don't need to test IBM DB SA separately, since # other tests below test the differences between how they use the batch # strategy with mock.patch.object(token_sql, 'sql') as mock_sql: mock_sql.session_for_write().__enter__( ).query().filter().delete.return_value = 0 mock_sql.session_for_write().__enter__( ).bind.dialect.name = 'mysql' tok = token_sql.Token() expiry_mock = mock.Mock() ITERS = [1, 2, 3] expiry_mock.return_value = iter(ITERS) token_sql._expiry_range_batched = expiry_mock tok.flush_expired_tokens() # The expiry strategy is only invoked once, the other calls are via # the yield return. self.assertEqual(1, expiry_mock.call_count) mock_delete = mock_sql.session_for_write().__enter__( ).query().filter().delete self.assertThat(mock_delete.call_args_list, matchers.HasLength(len(ITERS))) def test_expiry_range_batched(self): upper_bound_mock = mock.Mock(side_effect=[1, "final value"]) sess_mock = mock.Mock() query_mock = sess_mock.query().filter().order_by().offset().limit() query_mock.one.side_effect = [['test'], sql.NotFound()] for i, x in enumerate(token_sql._expiry_range_batched(sess_mock, upper_bound_mock, batch_size=50)): if i == 0: # The first time the batch iterator returns, it should return # the first result that comes back from the database. self.assertEqual('test', x) elif i == 1: # The second time, the database range function should return # nothing, so the batch iterator returns the result of the # upper_bound function self.assertEqual("final value", x) else: self.fail("range batch function returned more than twice") def test_expiry_range_strategy_sqlite(self): tok = token_sql.Token() sqlite_strategy = tok._expiry_range_strategy('sqlite') self.assertEqual(token_sql._expiry_range_all, sqlite_strategy) def test_expiry_range_strategy_ibm_db_sa(self): tok = token_sql.Token() db2_strategy = tok._expiry_range_strategy('ibm_db_sa') self.assertIsInstance(db2_strategy, functools.partial) self.assertEqual(token_sql._expiry_range_batched, db2_strategy.func) self.assertEqual({'batch_size': 100}, db2_strategy.keywords) def test_expiry_range_strategy_mysql(self): tok = token_sql.Token() mysql_strategy = tok._expiry_range_strategy('mysql') self.assertIsInstance(mysql_strategy, functools.partial) self.assertEqual(token_sql._expiry_range_batched, mysql_strategy.func) self.assertEqual({'batch_size': 1000}, mysql_strategy.keywords) class SqlCatalog(SqlTests, catalog_tests.CatalogTests): _legacy_endpoint_id_in_endpoint = True _enabled_default_to_true_when_creating_endpoint = True def test_catalog_ignored_malformed_urls(self): service = unit.new_service_ref() self.catalog_api.create_service(service['id'], service) malformed_url = "http://192.168.1.104:8774/v2/$(tenant)s" endpoint = unit.new_endpoint_ref(service_id=service['id'], url=malformed_url, region_id=None) self.catalog_api.create_endpoint(endpoint['id'], endpoint.copy()) # NOTE(dstanek): there are no valid URLs, so nothing is in the catalog catalog = self.catalog_api.get_catalog('fake-user', 'fake-tenant') self.assertEqual({}, catalog) def test_get_catalog_with_empty_public_url(self): service = unit.new_service_ref() self.catalog_api.create_service(service['id'], service) endpoint = unit.new_endpoint_ref(url='', service_id=service['id'], region_id=None) self.catalog_api.create_endpoint(endpoint['id'], endpoint.copy()) catalog = self.catalog_api.get_catalog('user', 'tenant') catalog_endpoint = catalog[endpoint['region_id']][service['type']] self.assertEqual(service['name'], catalog_endpoint['name']) self.assertEqual(endpoint['id'], catalog_endpoint['id']) self.assertEqual('', catalog_endpoint['publicURL']) self.assertIsNone(catalog_endpoint.get('adminURL')) self.assertIsNone(catalog_endpoint.get('internalURL')) def test_create_endpoint_region_returns_not_found(self): service = unit.new_service_ref() self.catalog_api.create_service(service['id'], service) endpoint = unit.new_endpoint_ref(region_id=uuid.uuid4().hex, service_id=service['id']) self.assertRaises(exception.ValidationError, self.catalog_api.create_endpoint, endpoint['id'], endpoint.copy()) def test_create_region_invalid_id(self): region = unit.new_region_ref(id='0' * 256) self.assertRaises(exception.StringLengthExceeded, self.catalog_api.create_region, region) def test_create_region_invalid_parent_id(self): region = unit.new_region_ref(parent_region_id='0' * 256) self.assertRaises(exception.RegionNotFound, self.catalog_api.create_region, region) def test_delete_region_with_endpoint(self): # create a region region = unit.new_region_ref() self.catalog_api.create_region(region) # create a child region child_region = unit.new_region_ref(parent_region_id=region['id']) self.catalog_api.create_region(child_region) # create a service service = unit.new_service_ref() self.catalog_api.create_service(service['id'], service) # create an endpoint attached to the service and child region child_endpoint = unit.new_endpoint_ref(region_id=child_region['id'], service_id=service['id']) self.catalog_api.create_endpoint(child_endpoint['id'], child_endpoint) self.assertRaises(exception.RegionDeletionError, self.catalog_api.delete_region, child_region['id']) # create an endpoint attached to the service and parent region endpoint = unit.new_endpoint_ref(region_id=region['id'], service_id=service['id']) self.catalog_api.create_endpoint(endpoint['id'], endpoint) self.assertRaises(exception.RegionDeletionError, self.catalog_api.delete_region, region['id']) class SqlPolicy(SqlTests, policy_tests.PolicyTests): pass class SqlInheritance(SqlTests, assignment_tests.InheritanceTests): pass class SqlImpliedRoles(SqlTests, assignment_tests.ImpliedRoleTests): pass class SqlTokenCacheInvalidation(SqlTests, token_tests.TokenCacheInvalidation): def setUp(self): super(SqlTokenCacheInvalidation, self).setUp() self._create_test_data() class SqlFilterTests(SqlTests, identity_tests.FilterTests): def clean_up_entities(self): """Clean up entity test data from Filter Test Cases.""" for entity in ['user', 'group', 'project']: self._delete_test_data(entity, self.entity_list[entity]) self._delete_test_data(entity, self.domain1_entity_list[entity]) del self.entity_list del self.domain1_entity_list self.domain1['enabled'] = False self.resource_api.update_domain(self.domain1['id'], self.domain1) self.resource_api.delete_domain(self.domain1['id']) del self.domain1 def test_list_entities_filtered_by_domain(self): # NOTE(henry-nash): This method is here rather than in # unit.identity.test_backends since any domain filtering with LDAP is # handled by the manager layer (and is already tested elsewhere) not at # the driver level. self.addCleanup(self.clean_up_entities) self.domain1 = unit.new_domain_ref() self.resource_api.create_domain(self.domain1['id'], self.domain1) self.entity_list = {} self.domain1_entity_list = {} for entity in ['user', 'group', 'project']: # Create 5 entities, 3 of which are in domain1 DOMAIN1_ENTITIES = 3 self.entity_list[entity] = self._create_test_data(entity, 2) self.domain1_entity_list[entity] = self._create_test_data( entity, DOMAIN1_ENTITIES, self.domain1['id']) # Should get back the DOMAIN1_ENTITIES in domain1 hints = driver_hints.Hints() hints.add_filter('domain_id', self.domain1['id']) entities = self._list_entities(entity)(hints=hints) self.assertEqual(DOMAIN1_ENTITIES, len(entities)) self._match_with_list(entities, self.domain1_entity_list[entity]) # Check the driver has removed the filter from the list hints self.assertFalse(hints.get_exact_filter_by_name('domain_id')) def test_filter_sql_injection_attack(self): """Test against sql injection attack on filters Test Plan: - Attempt to get all entities back by passing a two-term attribute - Attempt to piggyback filter to damage DB (e.g. drop table) """ # Check we have some users users = self.identity_api.list_users() self.assertTrue(len(users) > 0) hints = driver_hints.Hints() hints.add_filter('name', "anything' or 'x'='x") users = self.identity_api.list_users(hints=hints) self.assertEqual(0, len(users)) # See if we can add a SQL command...use the group table instead of the # user table since 'user' is reserved word for SQLAlchemy. group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id) group = self.identity_api.create_group(group) hints = driver_hints.Hints() hints.add_filter('name', "x'; drop table group") groups = self.identity_api.list_groups(hints=hints) self.assertEqual(0, len(groups)) groups = self.identity_api.list_groups() self.assertTrue(len(groups) > 0) class SqlLimitTests(SqlTests, identity_tests.LimitTests): def setUp(self): super(SqlLimitTests, self).setUp() identity_tests.LimitTests.setUp(self) class FakeTable(sql.ModelBase): __tablename__ = 'test_table' col = sql.Column(sql.String(32), primary_key=True) @sql.handle_conflicts('keystone') def insert(self): raise db_exception.DBDuplicateEntry @sql.handle_conflicts('keystone') def update(self): raise db_exception.DBError( inner_exception=exc.IntegrityError('a', 'a', 'a')) @sql.handle_conflicts('keystone') def lookup(self): raise KeyError class SqlDecorators(unit.TestCase): def test_initialization_fail(self): self.assertRaises(exception.StringLengthExceeded, FakeTable, col='a' * 64) def test_initialization(self): tt = FakeTable(col='a') self.assertEqual('a', tt.col) def test_conflict_happend(self): self.assertRaises(exception.Conflict, FakeTable().insert) self.assertRaises(exception.UnexpectedError, FakeTable().update) def test_not_conflict_error(self): self.assertRaises(KeyError, FakeTable().lookup) class SqlModuleInitialization(unit.TestCase): @mock.patch.object(sql.core, 'CONF') @mock.patch.object(options, 'set_defaults') def test_initialize_module(self, set_defaults, CONF): sql.initialize() set_defaults.assert_called_with(CONF, connection='sqlite:///keystone.db') class SqlCredential(SqlTests): def _create_credential_with_user_id(self, user_id=uuid.uuid4().hex): credential = unit.new_credential_ref(user_id=user_id, extra=uuid.uuid4().hex, type=uuid.uuid4().hex) self.credential_api.create_credential(credential['id'], credential) return credential def _validateCredentialList(self, retrieved_credentials, expected_credentials): self.assertEqual(len(expected_credentials), len(retrieved_credentials)) retrived_ids = [c['id'] for c in retrieved_credentials] for cred in expected_credentials: self.assertIn(cred['id'], retrived_ids) def setUp(self): super(SqlCredential, self).setUp() self.credentials = [] for _ in range(3): self.credentials.append( self._create_credential_with_user_id()) self.user_credentials = [] for _ in range(3): cred = self._create_credential_with_user_id(self.user_foo['id']) self.user_credentials.append(cred) self.credentials.append(cred) def test_list_credentials(self): credentials = self.credential_api.list_credentials() self._validateCredentialList(credentials, self.credentials) # test filtering using hints hints = driver_hints.Hints() hints.add_filter('user_id', self.user_foo['id']) credentials = self.credential_api.list_credentials(hints) self._validateCredentialList(credentials, self.user_credentials) def test_list_credentials_for_user(self): credentials = self.credential_api.list_credentials_for_user( self.user_foo['id']) self._validateCredentialList(credentials, self.user_credentials) def test_list_credentials_for_user_and_type(self): cred = self.user_credentials[0] credentials = self.credential_api.list_credentials_for_user( self.user_foo['id'], type=cred['type']) self._validateCredentialList(credentials, [cred]) keystone-9.0.0/keystone/tests/functional/0000775000567000056710000000000012701407246021672 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/functional/__init__.py0000664000567000056710000000000012701407102023760 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/functional/shared/0000775000567000056710000000000012701407246023140 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/functional/shared/__init__.py0000664000567000056710000000000012701407102025226 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/tests/functional/shared/test_running.py0000664000567000056710000000402412701407102026220 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import requests import testtools.matchers from keystone.tests.functional import core as functests is_multiple_choices = testtools.matchers.Equals( requests.status_codes.codes.multiple_choices) is_ok = testtools.matchers.Equals(requests.status_codes.codes.ok) versions = ('v2.0', 'v3') class TestServerRunning(functests.BaseTestCase): def test_admin_responds_with_multiple_choices(self): resp = requests.get(self.ADMIN_URL) self.assertThat(resp.status_code, is_multiple_choices) def test_admin_versions(self): for version in versions: resp = requests.get(self.ADMIN_URL + '/' + version) self.assertThat( resp.status_code, testtools.matchers.Annotate( 'failed for version %s' % version, is_ok)) def test_public_responds_with_multiple_choices(self): resp = requests.get(self.PUBLIC_URL) self.assertThat(resp.status_code, is_multiple_choices) def test_public_versions(self): for version in versions: resp = requests.get(self.PUBLIC_URL + '/' + version) self.assertThat( resp.status_code, testtools.matchers.Annotate( 'failed for version %s' % version, is_ok)) def test_get_user_token(self): token = self.get_scoped_user_token() self.assertIsNotNone(token) def test_get_admin_token(self): token = self.get_scoped_admin_token() self.assertIsNotNone(token) keystone-9.0.0/keystone/tests/functional/core.py0000664000567000056710000000603712701407102023171 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import requests import testtools from keystone.tests.common import auth as common_auth class BaseTestCase(testtools.TestCase, common_auth.AuthTestMixin): request_headers = {'content-type': 'application/json'} def setUp(self): self.ADMIN_URL = os.environ.get('KSTEST_ADMIN_URL', 'http://localhost:35357') self.PUBLIC_URL = os.environ.get('KSTEST_PUBLIC_URL', 'http://localhost:5000') self.admin = { 'name': os.environ.get('KSTEST_ADMIN_USERNAME', 'admin'), 'password': os.environ.get('KSTEST_ADMIN_PASSWORD', ''), 'domain_id': os.environ.get('KSTEST_ADMIN_DOMAIN_ID', 'default') } self.user = { 'name': os.environ.get('KSTEST_USER_USERNAME', 'demo'), 'password': os.environ.get('KSTEST_USER_PASSWORD', ''), 'domain_id': os.environ.get('KSTEST_USER_DOMAIN_ID', 'default') } self.project_id = os.environ.get('KSTEST_PROJECT_ID') super(BaseTestCase, self).setUp() def _http_headers(self, token=None): headers = {'content-type': 'application/json'} if token: headers['X-Auth-Token'] = token return headers def get_scoped_token_response(self, user): """Convenience method so that we can test authenticated requests :param user: A dictionary with user information like 'username', 'password', 'domain_id' :returns: urllib3.Response object """ body = self.build_authentication_request( username=user['name'], user_domain_name=user['domain_id'], password=user['password'], project_id=self.project_id) return requests.post(self.PUBLIC_URL + '/v3/auth/tokens', headers=self.request_headers, json=body) def get_scoped_token(self, user): """Convenience method for getting scoped token This method doesn't do any token validaton. :param user: A dictionary with user information like 'username', 'password', 'domain_id' :returns: An OpenStack token for further use :rtype: str """ r = self.get_scoped_token_response(user) return r.headers.get('X-Subject-Token') def get_scoped_admin_token(self): return self.get_scoped_token(self.admin) def get_scoped_user_token(self): return self.get_scoped_token(self.user) keystone-9.0.0/keystone/__init__.py0000664000567000056710000000000012701407102020454 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/credential/0000775000567000056710000000000012701407246020500 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/credential/backends/0000775000567000056710000000000012701407246022252 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/credential/backends/__init__.py0000664000567000056710000000000012701407102024340 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/credential/backends/sql.py0000664000567000056710000000764512701407102023426 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common import driver_hints from keystone.common import sql from keystone import credential from keystone import exception class CredentialModel(sql.ModelBase, sql.DictBase): __tablename__ = 'credential' attributes = ['id', 'user_id', 'project_id', 'blob', 'type'] id = sql.Column(sql.String(64), primary_key=True) user_id = sql.Column(sql.String(64), nullable=False) project_id = sql.Column(sql.String(64)) blob = sql.Column(sql.JsonBlob(), nullable=False) type = sql.Column(sql.String(255), nullable=False) extra = sql.Column(sql.JsonBlob()) class Credential(credential.CredentialDriverV8): # credential crud @sql.handle_conflicts(conflict_type='credential') def create_credential(self, credential_id, credential): with sql.session_for_write() as session: ref = CredentialModel.from_dict(credential) session.add(ref) return ref.to_dict() @driver_hints.truncated def list_credentials(self, hints): with sql.session_for_read() as session: credentials = session.query(CredentialModel) credentials = sql.filter_limit_query(CredentialModel, credentials, hints) return [s.to_dict() for s in credentials] def list_credentials_for_user(self, user_id, type=None): with sql.session_for_read() as session: query = session.query(CredentialModel) query = query.filter_by(user_id=user_id) if type: query = query.filter_by(type=type) refs = query.all() return [ref.to_dict() for ref in refs] def _get_credential(self, session, credential_id): ref = session.query(CredentialModel).get(credential_id) if ref is None: raise exception.CredentialNotFound(credential_id=credential_id) return ref def get_credential(self, credential_id): with sql.session_for_read() as session: return self._get_credential(session, credential_id).to_dict() @sql.handle_conflicts(conflict_type='credential') def update_credential(self, credential_id, credential): with sql.session_for_write() as session: ref = self._get_credential(session, credential_id) old_dict = ref.to_dict() for k in credential: old_dict[k] = credential[k] new_credential = CredentialModel.from_dict(old_dict) for attr in CredentialModel.attributes: if attr != 'id': setattr(ref, attr, getattr(new_credential, attr)) ref.extra = new_credential.extra return ref.to_dict() def delete_credential(self, credential_id): with sql.session_for_write() as session: ref = self._get_credential(session, credential_id) session.delete(ref) def delete_credentials_for_project(self, project_id): with sql.session_for_write() as session: query = session.query(CredentialModel) query = query.filter_by(project_id=project_id) query.delete() def delete_credentials_for_user(self, user_id): with sql.session_for_write() as session: query = session.query(CredentialModel) query = query.filter_by(user_id=user_id) query.delete() keystone-9.0.0/keystone/credential/schema.py0000664000567000056710000000315312701407102022303 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. _credential_properties = { 'blob': { 'type': 'string' }, 'project_id': { 'type': 'string' }, 'type': { 'type': 'string' }, 'user_id': { 'type': 'string' } } credential_create = { 'type': 'object', 'properties': _credential_properties, 'additionalProperties': True, 'oneOf': [ { 'title': 'ec2 credential requires project_id', 'required': ['blob', 'type', 'user_id', 'project_id'], 'properties': { 'type': { 'enum': ['ec2'] } } }, { 'title': 'non-ec2 credential does not require project_id', 'required': ['blob', 'type', 'user_id'], 'properties': { 'type': { 'not': { 'enum': ['ec2'] } } } } ] } credential_update = { 'type': 'object', 'properties': _credential_properties, 'minProperties': 1, 'additionalProperties': True } keystone-9.0.0/keystone/credential/__init__.py0000664000567000056710000000125512701407102022603 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.credential import controllers # noqa from keystone.credential.core import * # noqa keystone-9.0.0/keystone/credential/core.py0000664000567000056710000001121712701407102021773 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Main entry point into the Credential service.""" import abc from oslo_config import cfg from oslo_log import log import six from keystone.common import dependency from keystone.common import driver_hints from keystone.common import manager from keystone import exception CONF = cfg.CONF LOG = log.getLogger(__name__) @dependency.provider('credential_api') class Manager(manager.Manager): """Default pivot point for the Credential backend. See :mod:`keystone.common.manager.Manager` for more details on how this dynamically calls the backend. """ driver_namespace = 'keystone.credential' def __init__(self): super(Manager, self).__init__(CONF.credential.driver) @manager.response_truncated def list_credentials(self, hints=None): return self.driver.list_credentials(hints or driver_hints.Hints()) @six.add_metaclass(abc.ABCMeta) class CredentialDriverV8(object): # credential crud @abc.abstractmethod def create_credential(self, credential_id, credential): """Creates a new credential. :raises keystone.exception.Conflict: If a duplicate credential exists. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_credentials(self, hints): """List all credentials. :param hints: contains the list of filters yet to be satisfied. Any filters satisfied here will be removed so that the caller will know if any filters remain. :returns: a list of credential_refs or an empty list. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_credentials_for_user(self, user_id, type=None): """List credentials for a user. :param user_id: ID of a user to filter credentials by. :param type: type of credentials to filter on. :returns: a list of credential_refs or an empty list. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_credential(self, credential_id): """Get a credential by ID. :returns: credential_ref :raises keystone.exception.CredentialNotFound: If credential doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def update_credential(self, credential_id, credential): """Updates an existing credential. :raises keystone.exception.CredentialNotFound: If credential doesn't exist. :raises keystone.exception.Conflict: If a duplicate credential exists. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_credential(self, credential_id): """Deletes an existing credential. :raises keystone.exception.CredentialNotFound: If credential doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_credentials_for_project(self, project_id): """Deletes all credentials for a project.""" self._delete_credentials(lambda cr: cr['project_id'] == project_id) @abc.abstractmethod def delete_credentials_for_user(self, user_id): """Deletes all credentials for a user.""" self._delete_credentials(lambda cr: cr['user_id'] == user_id) def _delete_credentials(self, match_fn): """Do the actual credential deletion work (default implementation). :param match_fn: function that takes a credential dict as the parameter and returns true or false if the identifier matches the credential dict. """ for cr in self.list_credentials(): if match_fn(cr): try: self.credential_api.delete_credential(cr['id']) except exception.CredentialNotFound: LOG.debug('Deletion of credential is not required: %s', cr['id']) Driver = manager.create_legacy_driver(CredentialDriverV8) keystone-9.0.0/keystone/credential/controllers.py0000664000567000056710000001063112701407102023410 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import hashlib from oslo_serialization import jsonutils from keystone.common import controller from keystone.common import dependency from keystone.common import validation from keystone.credential import schema from keystone import exception from keystone.i18n import _ @dependency.requires('credential_api') class CredentialV3(controller.V3Controller): collection_name = 'credentials' member_name = 'credential' def __init__(self): super(CredentialV3, self).__init__() self.get_member_from_driver = self.credential_api.get_credential def _assign_unique_id(self, ref, trust_id=None): # Generates and assigns a unique identifier to # a credential reference. if ref.get('type', '').lower() == 'ec2': try: blob = jsonutils.loads(ref.get('blob')) except (ValueError, TypeError): raise exception.ValidationError( message=_('Invalid blob in credential')) if not blob or not isinstance(blob, dict): raise exception.ValidationError(attribute='blob', target='credential') if blob.get('access') is None: raise exception.ValidationError(attribute='access', target='blob') ret_ref = ref.copy() ret_ref['id'] = hashlib.sha256(blob['access']).hexdigest() # Update the blob with the trust_id, so credentials created # with a trust scoped token will result in trust scoped # tokens when authentication via ec2tokens happens if trust_id is not None: blob['trust_id'] = trust_id ret_ref['blob'] = jsonutils.dumps(blob) return ret_ref else: return super(CredentialV3, self)._assign_unique_id(ref) @controller.protected() @validation.validated(schema.credential_create, 'credential') def create_credential(self, context, credential): trust_id = self._get_trust_id_for_request(context) ref = self._assign_unique_id(self._normalize_dict(credential), trust_id) ref = self.credential_api.create_credential(ref['id'], ref) return CredentialV3.wrap_member(context, ref) @staticmethod def _blob_to_json(ref): # credentials stored via ec2tokens before the fix for #1259584 # need json serializing, as that's the documented API format blob = ref.get('blob') if isinstance(blob, dict): new_ref = ref.copy() new_ref['blob'] = jsonutils.dumps(blob) return new_ref else: return ref @controller.filterprotected('user_id', 'type') def list_credentials(self, context, filters): hints = CredentialV3.build_driver_hints(context, filters) refs = self.credential_api.list_credentials(hints) ret_refs = [self._blob_to_json(r) for r in refs] return CredentialV3.wrap_collection(context, ret_refs, hints=hints) @controller.protected() def get_credential(self, context, credential_id): ref = self.credential_api.get_credential(credential_id) ret_ref = self._blob_to_json(ref) return CredentialV3.wrap_member(context, ret_ref) @controller.protected() @validation.validated(schema.credential_update, 'credential') def update_credential(self, context, credential_id, credential): self._require_matching_id(credential_id, credential) ref = self.credential_api.update_credential(credential_id, credential) return CredentialV3.wrap_member(context, ref) @controller.protected() def delete_credential(self, context, credential_id): return self.credential_api.delete_credential(credential_id) keystone-9.0.0/keystone/credential/routers.py0000664000567000056710000000201212701407102022537 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """WSGI Routers for the Credentials service.""" from keystone.common import router from keystone.common import wsgi from keystone.credential import controllers class Routers(wsgi.RoutersBase): def append_v3_routers(self, mapper, routers): routers.append( router.Router(controllers.CredentialV3(), 'credentials', 'credential', resource_descriptions=self.v3_resources)) keystone-9.0.0/keystone/federation/0000775000567000056710000000000012701407246020506 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/federation/backends/0000775000567000056710000000000012701407246022260 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/federation/backends/__init__.py0000664000567000056710000000000012701407102024346 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/federation/backends/sql.py0000664000567000056710000003511012701407102023420 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from oslo_serialization import jsonutils import six from sqlalchemy import orm from keystone.common import sql from keystone import exception from keystone.federation import core from keystone.i18n import _ LOG = log.getLogger(__name__) class FederationProtocolModel(sql.ModelBase, sql.DictBase): __tablename__ = 'federation_protocol' attributes = ['id', 'idp_id', 'mapping_id'] mutable_attributes = frozenset(['mapping_id']) id = sql.Column(sql.String(64), primary_key=True) idp_id = sql.Column(sql.String(64), sql.ForeignKey('identity_provider.id', ondelete='CASCADE'), primary_key=True) mapping_id = sql.Column(sql.String(64), nullable=False) @classmethod def from_dict(cls, dictionary): new_dictionary = dictionary.copy() return cls(**new_dictionary) def to_dict(self): """Return a dictionary with model's attributes.""" d = dict() for attr in self.__class__.attributes: d[attr] = getattr(self, attr) return d class IdentityProviderModel(sql.ModelBase, sql.DictBase): __tablename__ = 'identity_provider' attributes = ['id', 'enabled', 'description', 'remote_ids'] mutable_attributes = frozenset(['description', 'enabled', 'remote_ids']) id = sql.Column(sql.String(64), primary_key=True) enabled = sql.Column(sql.Boolean, nullable=False) description = sql.Column(sql.Text(), nullable=True) remote_ids = orm.relationship('IdPRemoteIdsModel', order_by='IdPRemoteIdsModel.remote_id', cascade='all, delete-orphan') @classmethod def from_dict(cls, dictionary): new_dictionary = dictionary.copy() remote_ids_list = new_dictionary.pop('remote_ids', None) if not remote_ids_list: remote_ids_list = [] identity_provider = cls(**new_dictionary) remote_ids = [] # NOTE(fmarco76): the remote_ids_list contains only remote ids # associated with the IdP because of the "relationship" established in # sqlalchemy and corresponding to the FK in the idp_remote_ids table for remote in remote_ids_list: remote_ids.append(IdPRemoteIdsModel(remote_id=remote)) identity_provider.remote_ids = remote_ids return identity_provider def to_dict(self): """Return a dictionary with model's attributes.""" d = dict() for attr in self.__class__.attributes: d[attr] = getattr(self, attr) d['remote_ids'] = [] for remote in self.remote_ids: d['remote_ids'].append(remote.remote_id) return d class IdPRemoteIdsModel(sql.ModelBase, sql.DictBase): __tablename__ = 'idp_remote_ids' attributes = ['idp_id', 'remote_id'] mutable_attributes = frozenset(['idp_id', 'remote_id']) idp_id = sql.Column(sql.String(64), sql.ForeignKey('identity_provider.id', ondelete='CASCADE')) remote_id = sql.Column(sql.String(255), primary_key=True) @classmethod def from_dict(cls, dictionary): new_dictionary = dictionary.copy() return cls(**new_dictionary) def to_dict(self): """Return a dictionary with model's attributes.""" d = dict() for attr in self.__class__.attributes: d[attr] = getattr(self, attr) return d class MappingModel(sql.ModelBase, sql.DictBase): __tablename__ = 'mapping' attributes = ['id', 'rules'] id = sql.Column(sql.String(64), primary_key=True) rules = sql.Column(sql.JsonBlob(), nullable=False) @classmethod def from_dict(cls, dictionary): new_dictionary = dictionary.copy() new_dictionary['rules'] = jsonutils.dumps(new_dictionary['rules']) return cls(**new_dictionary) def to_dict(self): """Return a dictionary with model's attributes.""" d = dict() for attr in self.__class__.attributes: d[attr] = getattr(self, attr) d['rules'] = jsonutils.loads(d['rules']) return d class ServiceProviderModel(sql.ModelBase, sql.DictBase): __tablename__ = 'service_provider' attributes = ['auth_url', 'id', 'enabled', 'description', 'relay_state_prefix', 'sp_url'] mutable_attributes = frozenset(['auth_url', 'description', 'enabled', 'relay_state_prefix', 'sp_url']) id = sql.Column(sql.String(64), primary_key=True) enabled = sql.Column(sql.Boolean, nullable=False) description = sql.Column(sql.Text(), nullable=True) auth_url = sql.Column(sql.String(256), nullable=False) sp_url = sql.Column(sql.String(256), nullable=False) relay_state_prefix = sql.Column(sql.String(256), nullable=False) @classmethod def from_dict(cls, dictionary): new_dictionary = dictionary.copy() return cls(**new_dictionary) def to_dict(self): """Return a dictionary with model's attributes.""" d = dict() for attr in self.__class__.attributes: d[attr] = getattr(self, attr) return d class Federation(core.FederationDriverV9): _CONFLICT_LOG_MSG = 'Conflict %(conflict_type)s: %(details)s' # Identity Provider CRUD def create_idp(self, idp_id, idp): idp['id'] = idp_id try: with sql.session_for_write() as session: idp_ref = IdentityProviderModel.from_dict(idp) session.add(idp_ref) return idp_ref.to_dict() except sql.DBDuplicateEntry as e: conflict_type = 'identity_provider' details = six.text_type(e) LOG.debug(self._CONFLICT_LOG_MSG, {'conflict_type': conflict_type, 'details': details}) if 'remote_id' in details: msg = _('Duplicate remote ID: %s') else: msg = _('Duplicate entry: %s') msg = msg % e.value raise exception.Conflict(type=conflict_type, details=msg) def delete_idp(self, idp_id): with sql.session_for_write() as session: self._delete_assigned_protocols(session, idp_id) idp_ref = self._get_idp(session, idp_id) session.delete(idp_ref) def _get_idp(self, session, idp_id): idp_ref = session.query(IdentityProviderModel).get(idp_id) if not idp_ref: raise exception.IdentityProviderNotFound(idp_id=idp_id) return idp_ref def _get_idp_from_remote_id(self, session, remote_id): q = session.query(IdPRemoteIdsModel) q = q.filter_by(remote_id=remote_id) try: return q.one() except sql.NotFound: raise exception.IdentityProviderNotFound(idp_id=remote_id) def list_idps(self, hints=None): with sql.session_for_read() as session: query = session.query(IdentityProviderModel) idps = sql.filter_limit_query(IdentityProviderModel, query, hints) idps_list = [idp.to_dict() for idp in idps] return idps_list def get_idp(self, idp_id): with sql.session_for_read() as session: idp_ref = self._get_idp(session, idp_id) return idp_ref.to_dict() def get_idp_from_remote_id(self, remote_id): with sql.session_for_read() as session: ref = self._get_idp_from_remote_id(session, remote_id) return ref.to_dict() def update_idp(self, idp_id, idp): with sql.session_for_write() as session: idp_ref = self._get_idp(session, idp_id) old_idp = idp_ref.to_dict() old_idp.update(idp) new_idp = IdentityProviderModel.from_dict(old_idp) for attr in IdentityProviderModel.mutable_attributes: setattr(idp_ref, attr, getattr(new_idp, attr)) return idp_ref.to_dict() # Protocol CRUD def _get_protocol(self, session, idp_id, protocol_id): q = session.query(FederationProtocolModel) q = q.filter_by(id=protocol_id, idp_id=idp_id) try: return q.one() except sql.NotFound: kwargs = {'protocol_id': protocol_id, 'idp_id': idp_id} raise exception.FederatedProtocolNotFound(**kwargs) @sql.handle_conflicts(conflict_type='federation_protocol') def create_protocol(self, idp_id, protocol_id, protocol): protocol['id'] = protocol_id protocol['idp_id'] = idp_id with sql.session_for_write() as session: self._get_idp(session, idp_id) protocol_ref = FederationProtocolModel.from_dict(protocol) session.add(protocol_ref) return protocol_ref.to_dict() def update_protocol(self, idp_id, protocol_id, protocol): with sql.session_for_write() as session: proto_ref = self._get_protocol(session, idp_id, protocol_id) old_proto = proto_ref.to_dict() old_proto.update(protocol) new_proto = FederationProtocolModel.from_dict(old_proto) for attr in FederationProtocolModel.mutable_attributes: setattr(proto_ref, attr, getattr(new_proto, attr)) return proto_ref.to_dict() def get_protocol(self, idp_id, protocol_id): with sql.session_for_read() as session: protocol_ref = self._get_protocol(session, idp_id, protocol_id) return protocol_ref.to_dict() def list_protocols(self, idp_id): with sql.session_for_read() as session: q = session.query(FederationProtocolModel) q = q.filter_by(idp_id=idp_id) protocols = [protocol.to_dict() for protocol in q] return protocols def delete_protocol(self, idp_id, protocol_id): with sql.session_for_write() as session: key_ref = self._get_protocol(session, idp_id, protocol_id) session.delete(key_ref) def _delete_assigned_protocols(self, session, idp_id): query = session.query(FederationProtocolModel) query = query.filter_by(idp_id=idp_id) query.delete() # Mapping CRUD def _get_mapping(self, session, mapping_id): mapping_ref = session.query(MappingModel).get(mapping_id) if not mapping_ref: raise exception.MappingNotFound(mapping_id=mapping_id) return mapping_ref @sql.handle_conflicts(conflict_type='mapping') def create_mapping(self, mapping_id, mapping): ref = {} ref['id'] = mapping_id ref['rules'] = mapping.get('rules') with sql.session_for_write() as session: mapping_ref = MappingModel.from_dict(ref) session.add(mapping_ref) return mapping_ref.to_dict() def delete_mapping(self, mapping_id): with sql.session_for_write() as session: mapping_ref = self._get_mapping(session, mapping_id) session.delete(mapping_ref) def list_mappings(self): with sql.session_for_read() as session: mappings = session.query(MappingModel) return [x.to_dict() for x in mappings] def get_mapping(self, mapping_id): with sql.session_for_read() as session: mapping_ref = self._get_mapping(session, mapping_id) return mapping_ref.to_dict() @sql.handle_conflicts(conflict_type='mapping') def update_mapping(self, mapping_id, mapping): ref = {} ref['id'] = mapping_id ref['rules'] = mapping.get('rules') with sql.session_for_write() as session: mapping_ref = self._get_mapping(session, mapping_id) old_mapping = mapping_ref.to_dict() old_mapping.update(ref) new_mapping = MappingModel.from_dict(old_mapping) for attr in MappingModel.attributes: setattr(mapping_ref, attr, getattr(new_mapping, attr)) return mapping_ref.to_dict() def get_mapping_from_idp_and_protocol(self, idp_id, protocol_id): with sql.session_for_read() as session: protocol_ref = self._get_protocol(session, idp_id, protocol_id) mapping_id = protocol_ref.mapping_id mapping_ref = self._get_mapping(session, mapping_id) return mapping_ref.to_dict() # Service Provider CRUD @sql.handle_conflicts(conflict_type='service_provider') def create_sp(self, sp_id, sp): sp['id'] = sp_id with sql.session_for_write() as session: sp_ref = ServiceProviderModel.from_dict(sp) session.add(sp_ref) return sp_ref.to_dict() def delete_sp(self, sp_id): with sql.session_for_write() as session: sp_ref = self._get_sp(session, sp_id) session.delete(sp_ref) def _get_sp(self, session, sp_id): sp_ref = session.query(ServiceProviderModel).get(sp_id) if not sp_ref: raise exception.ServiceProviderNotFound(sp_id=sp_id) return sp_ref def list_sps(self, hints=None): with sql.session_for_read() as session: query = session.query(ServiceProviderModel) sps = sql.filter_limit_query(ServiceProviderModel, query, hints) sps_list = [sp.to_dict() for sp in sps] return sps_list def get_sp(self, sp_id): with sql.session_for_read() as session: sp_ref = self._get_sp(session, sp_id) return sp_ref.to_dict() def update_sp(self, sp_id, sp): with sql.session_for_write() as session: sp_ref = self._get_sp(session, sp_id) old_sp = sp_ref.to_dict() old_sp.update(sp) new_sp = ServiceProviderModel.from_dict(old_sp) for attr in ServiceProviderModel.mutable_attributes: setattr(sp_ref, attr, getattr(new_sp, attr)) return sp_ref.to_dict() def get_enabled_service_providers(self): with sql.session_for_read() as session: service_providers = session.query(ServiceProviderModel) service_providers = service_providers.filter_by(enabled=True) return service_providers keystone-9.0.0/keystone/federation/constants.py0000664000567000056710000000123112701407102023060 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. FEDERATION = 'OS-FEDERATION' IDENTITY_PROVIDER = 'OS-FEDERATION:identity_provider' PROTOCOL = 'OS-FEDERATION:protocol' keystone-9.0.0/keystone/federation/utils.py0000664000567000056710000007467212701407102022227 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utilities for Federation Extension.""" import ast import re import jsonschema from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils import six from keystone import exception from keystone.i18n import _, _LW CONF = cfg.CONF LOG = log.getLogger(__name__) class UserType(object): """User mapping type.""" EPHEMERAL = 'ephemeral' LOCAL = 'local' MAPPING_SCHEMA = { "type": "object", "required": ['rules'], "properties": { "rules": { "minItems": 1, "type": "array", "items": { "type": "object", "required": ['local', 'remote'], "additionalProperties": False, "properties": { "local": { "type": "array", "items": { "type": "object", "additionalProperties": False, "properties": { "user": { "type": "object", "properties": { "id": {"type": "string"}, "name": {"type": "string"}, "email": {"type": "string"}, "domain": { "type": "object", "properties": { "id": {"type": "string"}, "name": {"type": "string"} }, "additionalProperties": False, }, "type": { "type": "string", "enum": [UserType.EPHEMERAL, UserType.LOCAL] } }, "additionalProperties": False }, "group": { "type": "object", "properties": { "id": {"type": "string"}, "name": {"type": "string"} }, "additionalProperties": False, }, "groups": { "type": "string" }, "group_ids": { "type": "string" }, "domain": { "type": "object", "properties": { "id": {"type": "string"}, "name": {"type": "string"} }, "additionalProperties": False } } } }, "remote": { "minItems": 1, "type": "array", "items": { "type": "object", "oneOf": [ {"$ref": "#/definitions/empty"}, {"$ref": "#/definitions/any_one_of"}, {"$ref": "#/definitions/not_any_of"}, {"$ref": "#/definitions/blacklist"}, {"$ref": "#/definitions/whitelist"} ], } } } } } }, "definitions": { "empty": { "type": "object", "required": ['type'], "properties": { "type": { "type": "string" }, }, "additionalProperties": False, }, "any_one_of": { "type": "object", "additionalProperties": False, "required": ['type', 'any_one_of'], "properties": { "type": { "type": "string" }, "any_one_of": { "type": "array" }, "regex": { "type": "boolean" } } }, "not_any_of": { "type": "object", "additionalProperties": False, "required": ['type', 'not_any_of'], "properties": { "type": { "type": "string" }, "not_any_of": { "type": "array" }, "regex": { "type": "boolean" } } }, "blacklist": { "type": "object", "additionalProperties": False, "required": ['type', 'blacklist'], "properties": { "type": { "type": "string" }, "blacklist": { "type": "array" } } }, "whitelist": { "type": "object", "additionalProperties": False, "required": ['type', 'whitelist'], "properties": { "type": { "type": "string" }, "whitelist": { "type": "array" } } } } } class DirectMaps(object): """An abstraction around the remote matches. Each match is treated internally as a list. """ def __init__(self): self._matches = [] def add(self, values): """Adds a matched value to the list of matches. :param list value: the match to save """ self._matches.append(values) def __getitem__(self, idx): """Used by Python when executing ``''.format(*DirectMaps())``.""" value = self._matches[idx] if isinstance(value, list) and len(value) == 1: return value[0] else: return value def validate_mapping_structure(ref): v = jsonschema.Draft4Validator(MAPPING_SCHEMA) messages = '' for error in sorted(v.iter_errors(ref), key=str): messages = messages + error.message + "\n" if messages: raise exception.ValidationError(messages) def validate_expiration(token_ref): if timeutils.utcnow() > token_ref.expires: raise exception.Unauthorized(_('Federation token is expired')) def validate_groups_cardinality(group_ids, mapping_id): """Check if groups list is non-empty. :param group_ids: list of group ids :type group_ids: list of str :raises keystone.exception.MissingGroups: if ``group_ids`` cardinality is 0 """ if not group_ids: raise exception.MissingGroups(mapping_id=mapping_id) def get_remote_id_parameter(protocol): # NOTE(marco-fargetta): Since we support any protocol ID, we attempt to # retrieve the remote_id_attribute of the protocol ID. If it's not # registered in the config, then register the option and try again. # This allows the user to register protocols other than oidc and saml2. remote_id_parameter = None try: remote_id_parameter = CONF[protocol]['remote_id_attribute'] except AttributeError: CONF.register_opt(cfg.StrOpt('remote_id_attribute'), group=protocol) try: remote_id_parameter = CONF[protocol]['remote_id_attribute'] except AttributeError: # nosec # No remote ID attr, will be logged and use the default instead. pass if not remote_id_parameter: LOG.debug('Cannot find "remote_id_attribute" in configuration ' 'group %s. Trying default location in ' 'group federation.', protocol) remote_id_parameter = CONF.federation.remote_id_attribute return remote_id_parameter def validate_idp(idp, protocol, assertion): """The IdP providing the assertion should be registered for the mapping.""" remote_id_parameter = get_remote_id_parameter(protocol) if not remote_id_parameter or not idp['remote_ids']: LOG.debug('Impossible to identify the IdP %s ', idp['id']) # If nothing is defined, the administrator may want to # allow the mapping of every IdP return try: idp_remote_identifier = assertion[remote_id_parameter] except KeyError: msg = _('Could not find Identity Provider identifier in ' 'environment') raise exception.ValidationError(msg) if idp_remote_identifier not in idp['remote_ids']: msg = _('Incoming identity provider identifier not included ' 'among the accepted identifiers.') raise exception.Forbidden(msg) def validate_groups_in_backend(group_ids, mapping_id, identity_api): """Iterate over group ids and make sure they are present in the backend. This call is not transactional. :param group_ids: IDs of the groups to be checked :type group_ids: list of str :param mapping_id: id of the mapping used for this operation :type mapping_id: str :param identity_api: Identity Manager object used for communication with backend :type identity_api: identity.Manager :raises keystone.exception.MappedGroupNotFound: If the group returned by mapping was not found in the backend. """ for group_id in group_ids: try: identity_api.get_group(group_id) except exception.GroupNotFound: raise exception.MappedGroupNotFound( group_id=group_id, mapping_id=mapping_id) def validate_groups(group_ids, mapping_id, identity_api): """Check group ids cardinality and check their existence in the backend. This call is not transactional. :param group_ids: IDs of the groups to be checked :type group_ids: list of str :param mapping_id: id of the mapping used for this operation :type mapping_id: str :param identity_api: Identity Manager object used for communication with backend :type identity_api: identity.Manager :raises keystone.exception.MappedGroupNotFound: If the group returned by mapping was not found in the backend. :raises keystone.exception.MissingGroups: If ``group_ids`` cardinality is 0. """ validate_groups_cardinality(group_ids, mapping_id) validate_groups_in_backend(group_ids, mapping_id, identity_api) # TODO(marek-denis): Optimize this function, so the number of calls to the # backend are minimized. def transform_to_group_ids(group_names, mapping_id, identity_api, resource_api): """Transform groups identified by name/domain to their ids Function accepts list of groups identified by a name and domain giving a list of group ids in return. Example of group_names parameter:: [ { "name": "group_name", "domain": { "id": "domain_id" }, }, { "name": "group_name_2", "domain": { "name": "domain_name" } } ] :param group_names: list of group identified by name and its domain. :type group_names: list :param mapping_id: id of the mapping used for mapping assertion into local credentials :type mapping_id: str :param identity_api: identity_api object :param resource_api: resource manager object :returns: generator object with group ids :raises keystone.exception.MappedGroupNotFound: in case asked group doesn't exist in the backend. """ def resolve_domain(domain): """Return domain id. Input is a dictionary with a domain identified either by a ``id`` or a ``name``. In the latter case system will attempt to fetch domain object from the backend. :returns: domain's id :rtype: str """ domain_id = (domain.get('id') or resource_api.get_domain_by_name( domain.get('name')).get('id')) return domain_id for group in group_names: try: group_dict = identity_api.get_group_by_name( group['name'], resolve_domain(group['domain'])) yield group_dict['id'] except exception.GroupNotFound: LOG.debug('Skip mapping group %s; has no entry in the backend', group['name']) def get_assertion_params_from_env(context): LOG.debug('Environment variables: %s', context['environment']) prefix = CONF.federation.assertion_prefix for k, v in list(context['environment'].items()): if not k.startswith(prefix): continue # These bytes may be decodable as ISO-8859-1 according to Section # 3.2.4 of RFC 7230. Let's assume that our web server plugins are # correctly encoding the data. if not isinstance(v, six.text_type) and getattr(v, 'decode', False): v = v.decode('ISO-8859-1') yield (k, v) class RuleProcessor(object): """A class to process assertions and mapping rules.""" class _EvalType(object): """Mapping rule evaluation types.""" ANY_ONE_OF = 'any_one_of' NOT_ANY_OF = 'not_any_of' BLACKLIST = 'blacklist' WHITELIST = 'whitelist' def __init__(self, mapping_id, rules): """Initialize RuleProcessor. Example rules can be found at: :class:`keystone.tests.mapping_fixtures` :param mapping_id: id for the mapping :type mapping_id: string :param rules: rules from a mapping :type rules: dict """ self.mapping_id = mapping_id self.rules = rules def process(self, assertion_data): """Transform assertion to a dictionary. The dictionary contains mapping of user name and group ids based on mapping rules. This function will iterate through the mapping rules to find assertions that are valid. :param assertion_data: an assertion containing values from an IdP :type assertion_data: dict Example assertion_data:: { 'Email': 'testacct@example.com', 'UserName': 'testacct', 'FirstName': 'Test', 'LastName': 'Account', 'orgPersonType': 'Tester' } :returns: dictionary with user and group_ids The expected return structure is:: { 'name': 'foobar', 'group_ids': ['abc123', 'def456'], 'group_names': [ { 'name': 'group_name_1', 'domain': { 'name': 'domain1' } }, { 'name': 'group_name_1_1', 'domain': { 'name': 'domain1' } }, { 'name': 'group_name_2', 'domain': { 'id': 'xyz132' } } ] } """ # Assertions will come in as string key-value pairs, and will use a # semi-colon to indicate multiple values, i.e. groups. # This will create a new dictionary where the values are arrays, and # any multiple values are stored in the arrays. LOG.debug('assertion data: %s', assertion_data) assertion = {n: v.split(';') for n, v in assertion_data.items() if isinstance(v, six.string_types)} LOG.debug('assertion: %s', assertion) identity_values = [] LOG.debug('rules: %s', self.rules) for rule in self.rules: direct_maps = self._verify_all_requirements(rule['remote'], assertion) # If the compare comes back as None, then the rule did not apply # to the assertion data, go on to the next rule if direct_maps is None: continue # If there are no direct mappings, then add the local mapping # directly to the array of saved values. However, if there is # a direct mapping, then perform variable replacement. if not direct_maps: identity_values += rule['local'] else: for local in rule['local']: new_local = self._update_local_mapping(local, direct_maps) identity_values.append(new_local) LOG.debug('identity_values: %s', identity_values) mapped_properties = self._transform(identity_values) LOG.debug('mapped_properties: %s', mapped_properties) return mapped_properties def _transform(self, identity_values): """Transform local mappings, to an easier to understand format. Transform the incoming array to generate the return value for the process function. Generating content for Keystone tokens will be easier if some pre-processing is done at this level. :param identity_values: local mapping from valid evaluations :type identity_values: array of dict Example identity_values:: [ { 'group': {'id': '0cd5e9'}, 'user': { 'email': 'bob@example.com' }, }, { 'groups': ['member', 'admin', tester'], 'domain': { 'name': 'default_domain' } }, { 'group_ids': ['abc123', 'def456', '0cd5e9'] } ] :returns: dictionary with user name, group_ids and group_names. :rtype: dict """ def extract_groups(groups_by_domain): for groups in list(groups_by_domain.values()): for group in list({g['name']: g for g in groups}.values()): yield group def normalize_user(user): """Parse and validate user mapping.""" user_type = user.get('type') if user_type and user_type not in (UserType.EPHEMERAL, UserType.LOCAL): msg = _("User type %s not supported") % user_type raise exception.ValidationError(msg) if user_type is None: user_type = user['type'] = UserType.EPHEMERAL if user_type == UserType.EPHEMERAL: user['domain'] = { 'id': CONF.federation.federated_domain_name } # initialize the group_ids as a set to eliminate duplicates user = {} group_ids = set() group_names = list() groups_by_domain = dict() # if mapping yield no valid identity values, we should bail right away # instead of continuing on with a normalized bogus user if not identity_values: msg = _("Could not map any federated user properties to identity " "values. Check debug logs or the mapping used for " "additional details.") LOG.warning(msg) raise exception.ValidationError(msg) for identity_value in identity_values: if 'user' in identity_value: # if a mapping outputs more than one user name, log it if user: LOG.warning(_LW('Ignoring user name')) else: user = identity_value.get('user') if 'group' in identity_value: group = identity_value['group'] if 'id' in group: group_ids.add(group['id']) elif 'name' in group: domain = (group['domain'].get('name') or group['domain'].get('id')) groups_by_domain.setdefault(domain, list()).append(group) group_names.extend(extract_groups(groups_by_domain)) if 'groups' in identity_value: if 'domain' not in identity_value: msg = _("Invalid rule: %(identity_value)s. Both 'groups' " "and 'domain' keywords must be specified.") msg = msg % {'identity_value': identity_value} raise exception.ValidationError(msg) # In this case, identity_value['groups'] is a string # representation of a list, and we want a real list. This is # due to the way we do direct mapping substitutions today (see # function _update_local_mapping() ) try: group_names_list = ast.literal_eval( identity_value['groups']) except ValueError: group_names_list = [identity_value['groups']] domain = identity_value['domain'] group_dicts = [{'name': name, 'domain': domain} for name in group_names_list] group_names.extend(group_dicts) if 'group_ids' in identity_value: # If identity_values['group_ids'] is a string representation # of a list, parse it to a real list. Also, if the provided # group_ids parameter contains only one element, it will be # parsed as a simple string, and not a list or the # representation of a list. try: group_ids.update( ast.literal_eval(identity_value['group_ids'])) except (ValueError, SyntaxError): group_ids.update([identity_value['group_ids']]) normalize_user(user) return {'user': user, 'group_ids': list(group_ids), 'group_names': group_names} def _update_local_mapping(self, local, direct_maps): """Replace any {0}, {1} ... values with data from the assertion. :param local: local mapping reference that needs to be updated :type local: dict :param direct_maps: identity values used to update local :type direct_maps: keystone.federation.utils.DirectMaps Example local:: {'user': {'name': '{0} {1}', 'email': '{2}'}} Example direct_maps:: ['Bob', 'Thompson', 'bob@example.com'] :returns: new local mapping reference with replaced values. The expected return structure is:: {'user': {'name': 'Bob Thompson', 'email': 'bob@example.org'}} :raises keystone.exception.DirectMappingError: when referring to a remote match from a local section of a rule """ LOG.debug('direct_maps: %s', direct_maps) LOG.debug('local: %s', local) new = {} for k, v in local.items(): if isinstance(v, dict): new_value = self._update_local_mapping(v, direct_maps) else: try: new_value = v.format(*direct_maps) except IndexError: raise exception.DirectMappingError( mapping_id=self.mapping_id) new[k] = new_value return new def _verify_all_requirements(self, requirements, assertion): """Compare remote requirements of a rule against the assertion. If a value of ``None`` is returned, the rule with this assertion doesn't apply. If an array of zero length is returned, then there are no direct mappings to be performed, but the rule is valid. Otherwise, then it will first attempt to filter the values according to blacklist or whitelist rules and finally return the values in order, to be directly mapped. :param requirements: list of remote requirements from rules :type requirements: list Example requirements:: [ { "type": "UserName" }, { "type": "orgPersonType", "any_one_of": [ "Customer" ] }, { "type": "ADFS_GROUPS", "whitelist": [ "g1", "g2", "g3", "g4" ] } ] :param assertion: dict of attributes from an IdP :type assertion: dict Example assertion:: { 'UserName': ['testacct'], 'LastName': ['Account'], 'orgPersonType': ['Tester'], 'Email': ['testacct@example.com'], 'FirstName': ['Test'], 'ADFS_GROUPS': ['g1', 'g2'] } :returns: identity values used to update local :rtype: keystone.federation.utils.DirectMaps or None """ direct_maps = DirectMaps() for requirement in requirements: requirement_type = requirement['type'] direct_map_values = assertion.get(requirement_type) regex = requirement.get('regex', False) if not direct_map_values: return None any_one_values = requirement.get(self._EvalType.ANY_ONE_OF) if any_one_values is not None: if self._evaluate_requirement(any_one_values, direct_map_values, self._EvalType.ANY_ONE_OF, regex): continue else: return None not_any_values = requirement.get(self._EvalType.NOT_ANY_OF) if not_any_values is not None: if self._evaluate_requirement(not_any_values, direct_map_values, self._EvalType.NOT_ANY_OF, regex): continue else: return None # If 'any_one_of' or 'not_any_of' are not found, then values are # within 'type'. Attempt to find that 'type' within the assertion, # and filter these values if 'whitelist' or 'blacklist' is set. blacklisted_values = requirement.get(self._EvalType.BLACKLIST) whitelisted_values = requirement.get(self._EvalType.WHITELIST) # If a blacklist or whitelist is used, we want to map to the # whole list instead of just its values separately. if blacklisted_values is not None: direct_map_values = [v for v in direct_map_values if v not in blacklisted_values] elif whitelisted_values is not None: direct_map_values = [v for v in direct_map_values if v in whitelisted_values] direct_maps.add(direct_map_values) LOG.debug('updating a direct mapping: %s', direct_map_values) return direct_maps def _evaluate_values_by_regex(self, values, assertion_values): for value in values: for assertion_value in assertion_values: if re.search(value, assertion_value): return True return False def _evaluate_requirement(self, values, assertion_values, eval_type, regex): """Evaluate the incoming requirement and assertion. If the requirement type does not exist in the assertion data, then return False. If regex is specified, then compare the values and assertion values. Otherwise, grab the intersection of the values and use that to compare against the evaluation type. :param values: list of allowed values, defined in the requirement :type values: list :param assertion_values: The values from the assertion to evaluate :type assertion_values: list/string :param eval_type: determine how to evaluate requirements :type eval_type: string :param regex: perform evaluation with regex :type regex: boolean :returns: boolean, whether requirement is valid or not. """ if regex: any_match = self._evaluate_values_by_regex(values, assertion_values) else: any_match = bool(set(values).intersection(set(assertion_values))) if any_match and eval_type == self._EvalType.ANY_ONE_OF: return True if not any_match and eval_type == self._EvalType.NOT_ANY_OF: return True return False def assert_enabled_identity_provider(federation_api, idp_id): identity_provider = federation_api.get_idp(idp_id) if identity_provider.get('enabled') is not True: msg = _('Identity Provider %(idp)s is disabled') % {'idp': idp_id} LOG.debug(msg) raise exception.Forbidden(msg) def assert_enabled_service_provider_object(service_provider): if service_provider.get('enabled') is not True: sp_id = service_provider['id'] msg = _('Service Provider %(sp)s is disabled') % {'sp': sp_id} LOG.debug(msg) raise exception.Forbidden(msg) keystone-9.0.0/keystone/federation/schema.py0000664000567000056710000000634712701407102022321 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common import validation from keystone.common.validation import parameter_types basic_property_id = { 'type': 'object', 'properties': { 'id': { 'type': 'string' } }, 'required': ['id'], 'additionalProperties': False } saml_create = { 'type': 'object', 'properties': { 'identity': { 'type': 'object', 'properties': { 'token': basic_property_id, 'methods': { 'type': 'array' } }, 'required': ['token'], 'additionalProperties': False }, 'scope': { 'type': 'object', 'properties': { 'service_provider': basic_property_id }, 'required': ['service_provider'], 'additionalProperties': False }, }, 'required': ['identity', 'scope'], 'additionalProperties': False } _service_provider_properties = { # NOTE(rodrigods): The database accepts URLs with 256 as max length, # but parameter_types.url uses 225 as max length. 'auth_url': parameter_types.url, 'sp_url': parameter_types.url, 'description': validation.nullable(parameter_types.description), 'enabled': parameter_types.boolean, 'relay_state_prefix': validation.nullable(parameter_types.description) } service_provider_create = { 'type': 'object', 'properties': _service_provider_properties, # NOTE(rodrigods): 'id' is not required since it is passed in the URL 'required': ['auth_url', 'sp_url'], 'additionalProperties': False } service_provider_update = { 'type': 'object', 'properties': _service_provider_properties, # Make sure at least one property is being updated 'minProperties': 1, 'additionalProperties': False } _identity_provider_properties = { 'enabled': parameter_types.boolean, 'description': validation.nullable(parameter_types.description), 'remote_ids': { 'type': ['array', 'null'], 'items': { 'type': 'string' }, 'uniqueItems': True } } identity_provider_create = { 'type': 'object', 'properties': _identity_provider_properties, 'additionalProperties': False } identity_provider_update = { 'type': 'object', 'properties': _identity_provider_properties, # Make sure at least one property is being updated 'minProperties': 1, 'additionalProperties': False } federation_protocol_schema = { 'type': 'object', 'properties': { 'mapping_id': parameter_types.mapping_id_string }, # `mapping_id` is the property that cannot be ignored 'minProperties': 1, 'additionalProperties': False } keystone-9.0.0/keystone/federation/__init__.py0000664000567000056710000000117112701407102022606 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.federation.core import * # noqa keystone-9.0.0/keystone/federation/core.py0000664000567000056710000004733412701407102022012 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Main entry point into the Federation service.""" import abc from oslo_config import cfg from oslo_log import versionutils import six from keystone.common import dependency from keystone.common import extension from keystone.common import manager from keystone import exception from keystone.federation import utils CONF = cfg.CONF EXTENSION_DATA = { 'name': 'OpenStack Federation APIs', 'namespace': 'http://docs.openstack.org/identity/api/ext/' 'OS-FEDERATION/v1.0', 'alias': 'OS-FEDERATION', 'updated': '2013-12-17T12:00:0-00:00', 'description': 'OpenStack Identity Providers Mechanism.', 'links': [{ 'rel': 'describedby', 'type': 'text/html', 'href': 'http://specs.openstack.org/openstack/keystone-specs/api/v3/' 'identity-api-v3-os-federation-ext.html', }]} extension.register_admin_extension(EXTENSION_DATA['alias'], EXTENSION_DATA) extension.register_public_extension(EXTENSION_DATA['alias'], EXTENSION_DATA) @dependency.provider('federation_api') class Manager(manager.Manager): """Default pivot point for the Federation backend. See :mod:`keystone.common.manager.Manager` for more details on how this dynamically calls the backend. """ driver_namespace = 'keystone.federation' def __init__(self): super(Manager, self).__init__(CONF.federation.driver) # Make sure it is a driver version we support, and if it is a legacy # driver, then wrap it. if isinstance(self.driver, FederationDriverV8): self.driver = V9FederationWrapperForV8Driver(self.driver) elif not isinstance(self.driver, FederationDriverV9): raise exception.UnsupportedDriverVersion( driver=CONF.federation.driver) def get_enabled_service_providers(self): """List enabled service providers for Service Catalog Service Provider in a catalog contains three attributes: ``id``, ``auth_url``, ``sp_url``, where: - id is a unique, user defined identifier for service provider object - auth_url is an authentication URL of remote Keystone - sp_url a URL accessible at the remote service provider where SAML assertion is transmitted. :returns: list of dictionaries with enabled service providers :rtype: list of dicts """ def normalize(sp): ref = { 'auth_url': sp.auth_url, 'id': sp.id, 'sp_url': sp.sp_url } return ref service_providers = self.driver.get_enabled_service_providers() return [normalize(sp) for sp in service_providers] def evaluate(self, idp_id, protocol_id, assertion_data): mapping = self.get_mapping_from_idp_and_protocol(idp_id, protocol_id) rules = mapping['rules'] rule_processor = utils.RuleProcessor(mapping['id'], rules) mapped_properties = rule_processor.process(assertion_data) return mapped_properties, mapping['id'] # The FederationDriverBase class is the set of driver methods from earlier # drivers that we still support, that have not been removed or modified. This # class is then used to created the augmented V8 and V9 version abstract driver # classes, without having to duplicate a lot of abstract method signatures. # If you remove a method from V9, then move the abstract methods from this Base # class to the V8 class. Do not modify any of the method signatures in the Base # class - changes should only be made in the V8 and subsequent classes. @six.add_metaclass(abc.ABCMeta) class FederationDriverBase(object): @abc.abstractmethod def create_idp(self, idp_id, idp): """Create an identity provider. :param idp_id: ID of IdP object :type idp_id: string :param idp: idp object :type idp: dict :returns: idp ref :rtype: dict """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_idp(self, idp_id): """Delete an identity provider. :param idp_id: ID of IdP object :type idp_id: string :raises keystone.exception.IdentityProviderNotFound: If the IdP doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_idp(self, idp_id): """Get an identity provider by ID. :param idp_id: ID of IdP object :type idp_id: string :raises keystone.exception.IdentityProviderNotFound: If the IdP doesn't exist. :returns: idp ref :rtype: dict """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_idp_from_remote_id(self, remote_id): """Get an identity provider by remote ID. :param remote_id: ID of remote IdP :type idp_id: string :raises keystone.exception.IdentityProviderNotFound: If the IdP doesn't exist. :returns: idp ref :rtype: dict """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def update_idp(self, idp_id, idp): """Update an identity provider by ID. :param idp_id: ID of IdP object :type idp_id: string :param idp: idp object :type idp: dict :raises keystone.exception.IdentityProviderNotFound: If the IdP doesn't exist. :returns: idp ref :rtype: dict """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def create_protocol(self, idp_id, protocol_id, protocol): """Add an IdP-Protocol configuration. :param idp_id: ID of IdP object :type idp_id: string :param protocol_id: ID of protocol object :type protocol_id: string :param protocol: protocol object :type protocol: dict :raises keystone.exception.IdentityProviderNotFound: If the IdP doesn't exist. :returns: protocol ref :rtype: dict """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def update_protocol(self, idp_id, protocol_id, protocol): """Change an IdP-Protocol configuration. :param idp_id: ID of IdP object :type idp_id: string :param protocol_id: ID of protocol object :type protocol_id: string :param protocol: protocol object :type protocol: dict :raises keystone.exception.IdentityProviderNotFound: If the IdP doesn't exist. :raises keystone.exception.FederatedProtocolNotFound: If the federated protocol cannot be found. :returns: protocol ref :rtype: dict """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_protocol(self, idp_id, protocol_id): """Get an IdP-Protocol configuration. :param idp_id: ID of IdP object :type idp_id: string :param protocol_id: ID of protocol object :type protocol_id: string :raises keystone.exception.IdentityProviderNotFound: If the IdP doesn't exist. :raises keystone.exception.FederatedProtocolNotFound: If the federated protocol cannot be found. :returns: protocol ref :rtype: dict """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_protocols(self, idp_id): """List an IdP's supported protocols. :param idp_id: ID of IdP object :type idp_id: string :raises keystone.exception.IdentityProviderNotFound: If the IdP doesn't exist. :returns: list of protocol ref :rtype: list of dict """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_protocol(self, idp_id, protocol_id): """Delete an IdP-Protocol configuration. :param idp_id: ID of IdP object :type idp_id: string :param protocol_id: ID of protocol object :type protocol_id: string :raises keystone.exception.IdentityProviderNotFound: If the IdP doesn't exist. :raises keystone.exception.FederatedProtocolNotFound: If the federated protocol cannot be found. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def create_mapping(self, mapping_id, mapping): """Create a mapping. :param mapping_id: ID of mapping object :type mapping_id: string :param mapping: mapping ref with mapping name :type mapping: dict :returns: mapping ref :rtype: dict """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_mapping(self, mapping_id): """Delete a mapping. :param mapping_id: id of mapping to delete :type mapping_ref: string :returns: None """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def update_mapping(self, mapping_id, mapping_ref): """Update a mapping. :param mapping_id: id of mapping to update :type mapping_id: string :param mapping_ref: new mapping ref :type mapping_ref: dict :returns: mapping ref :rtype: dict """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_mappings(self): """List all mappings. :returns: list of mapping refs :rtype: list of dicts """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_mapping(self, mapping_id): """Get a mapping, returns the mapping based on mapping_id. :param mapping_id: id of mapping to get :type mapping_ref: string :raises keystone.exception.MappingNotFound: If the mapping cannot be found. :returns: mapping ref :rtype: dict """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_mapping_from_idp_and_protocol(self, idp_id, protocol_id): """Get mapping based on idp_id and protocol_id. :param idp_id: id of the identity provider :type idp_id: string :param protocol_id: id of the protocol :type protocol_id: string :raises keystone.exception.IdentityProviderNotFound: If the IdP doesn't exist. :raises keystone.exception.FederatedProtocolNotFound: If the federated protocol cannot be found. :returns: mapping ref :rtype: dict """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def create_sp(self, sp_id, sp): """Create a service provider. :param sp_id: id of the service provider :type sp_id: string :param sp: service prvider object :type sp: dict :returns: service provider ref :rtype: dict """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_sp(self, sp_id): """Delete a service provider. :param sp_id: id of the service provider :type sp_id: string :raises keystone.exception.ServiceProviderNotFound: If the service provider doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_sp(self, sp_id): """Get a service provider. :param sp_id: id of the service provider :type sp_id: string :returns: service provider ref :rtype: dict :raises keystone.exception.ServiceProviderNotFound: If the service provider doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def update_sp(self, sp_id, sp): """Update a service provider. :param sp_id: id of the service provider :type sp_id: string :param sp: service prvider object :type sp: dict :returns: service provider ref :rtype: dict :raises keystone.exception.ServiceProviderNotFound: If the service provider doesn't exist. """ raise exception.NotImplemented() # pragma: no cover def get_enabled_service_providers(self): """List enabled service providers for Service Catalog Service Provider in a catalog contains three attributes: ``id``, ``auth_url``, ``sp_url``, where: - id is a unique, user defined identifier for service provider object - auth_url is an authentication URL of remote Keystone - sp_url a URL accessible at the remote service provider where SAML assertion is transmitted. :returns: list of dictionaries with enabled service providers :rtype: list of dicts """ raise exception.NotImplemented() # pragma: no cover class FederationDriverV8(FederationDriverBase): """Removed or redefined methods from V8. Move the abstract methods of any methods removed or modified in later versions of the driver from FederationDriverBase to here. We maintain this so that legacy drivers, which will be a subclass of FederationDriverV8, can still reference them. """ @abc.abstractmethod def list_idps(self): """List all identity providers. :returns: list of idp refs :rtype: list of dicts :raises keystone.exception.IdentityProviderNotFound: If the IdP doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_sps(self): """List all service providers. :returns: List of service provider ref objects :rtype: list of dicts """ raise exception.NotImplemented() # pragma: no cover class FederationDriverV9(FederationDriverBase): """New or redefined methods from V8. Add any new V9 abstract methods (or those with modified signatures) to this class. """ @abc.abstractmethod def list_idps(self, hints): """List all identity providers. :param hints: filter hints which the driver should implement if at all possible. :returns: list of idp refs :rtype: list of dicts :raises keystone.exception.IdentityProviderNotFound: If the IdP doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_sps(self, hints): """List all service providers. :param hints: filter hints which the driver should implement if at all possible. :returns: List of service provider ref objects :rtype: list of dicts :raises keystone.exception.ServiceProviderNotFound: If the SP doesn't exist. """ raise exception.NotImplemented() # pragma: no cover class V9FederationWrapperForV8Driver(FederationDriverV9): """Wrapper class to supported a V8 legacy driver. In order to support legacy drivers without having to make the manager code driver-version aware, we wrap legacy drivers so that they look like the latest version. For the various changes made in a new driver, here are the actions needed in this wrapper: Method removed from new driver - remove the call-through method from this class, since the manager will no longer be calling it. Method signature (or meaning) changed - wrap the old method in a new signature here, and munge the input and output parameters accordingly. New method added to new driver - add a method to implement the new functionality here if possible. If that is not possible, then return NotImplemented, since we do not guarantee to support new functionality with legacy drivers. """ @versionutils.deprecated( as_of=versionutils.deprecated.MITAKA, what='keystone.federation.FederationDriverV8', in_favor_of='keystone.federation.FederationDriverV9', remove_in=+2) def __init__(self, wrapped_driver): self.driver = wrapped_driver def create_idp(self, idp_id, idp): return self.driver.create_idp(idp_id, idp) def delete_idp(self, idp_id): self.driver.delete_idp(idp_id) # NOTE(davechen): The hints is ignored here to support legacy drivers, # but the filters in hints will be remain unsatisfied and V3Controller # wrapper will apply these filters at the end. So that the result get # returned for list IdP will still be filtered with the legacy drivers. def list_idps(self, hints): return self.driver.list_idps() def get_idp(self, idp_id): return self.driver.get_idp(idp_id) def get_idp_from_remote_id(self, remote_id): return self.driver.get_idp_from_remote_id(remote_id) def update_idp(self, idp_id, idp): return self.driver.update_idp(idp_id, idp) def create_protocol(self, idp_id, protocol_id, protocol): return self.driver.create_protocol(idp_id, protocol_id, protocol) def update_protocol(self, idp_id, protocol_id, protocol): return self.driver.update_protocol(idp_id, protocol_id, protocol) def get_protocol(self, idp_id, protocol_id): return self.driver.get_protocol(idp_id, protocol_id) def list_protocols(self, idp_id): return self.driver.list_protocols(idp_id) def delete_protocol(self, idp_id, protocol_id): self.driver.delete_protocol(idp_id, protocol_id) def create_mapping(self, mapping_id, mapping): return self.driver.create_mapping(mapping_id, mapping) def delete_mapping(self, mapping_id): self.driver.delete_mapping(mapping_id) def update_mapping(self, mapping_id, mapping_ref): return self.driver.update_mapping(mapping_id, mapping_ref) def list_mappings(self): return self.driver.list_mappings() def get_mapping(self, mapping_id): return self.driver.get_mapping(mapping_id) def get_mapping_from_idp_and_protocol(self, idp_id, protocol_id): return self.driver.get_mapping_from_idp_and_protocol( idp_id, protocol_id) def create_sp(self, sp_id, sp): return self.driver.create_sp(sp_id, sp) def delete_sp(self, sp_id): self.driver.delete_sp(sp_id) # NOTE(davechen): The hints is ignored here to support legacy drivers, # but the filters in hints will be remain unsatisfied and V3Controller # wrapper will apply these filters at the end. So that the result get # returned for list SPs will still be filtered with the legacy drivers. def list_sps(self, hints): return self.driver.list_sps() def get_sp(self, sp_id): return self.driver.get_sp(sp_id) def update_sp(self, sp_id, sp): return self.driver.update_sp(sp_id, sp) def get_enabled_service_providers(self): return self.driver.get_enabled_service_providers() Driver = manager.create_legacy_driver(FederationDriverV8) keystone-9.0.0/keystone/federation/idp.py0000664000567000056710000005711012701407102021627 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import os import uuid from oslo_config import cfg from oslo_log import log from oslo_utils import fileutils from oslo_utils import importutils from oslo_utils import timeutils import saml2 from saml2 import client_base from saml2 import md from saml2.profile import ecp from saml2 import saml from saml2 import samlp from saml2.schema import soapenv from saml2 import sigver xmldsig = importutils.try_import("saml2.xmldsig") if not xmldsig: xmldsig = importutils.try_import("xmldsig") from keystone.common import environment from keystone.common import utils from keystone import exception from keystone.i18n import _, _LE LOG = log.getLogger(__name__) CONF = cfg.CONF class SAMLGenerator(object): """A class to generate SAML assertions.""" def __init__(self): self.assertion_id = uuid.uuid4().hex def samlize_token(self, issuer, recipient, user, user_domain_name, roles, project, project_domain_name, expires_in=None): """Convert Keystone attributes to a SAML assertion. :param issuer: URL of the issuing party :type issuer: string :param recipient: URL of the recipient :type recipient: string :param user: User name :type user: string :param user_domain_name: User Domain name :type user_domain_name: string :param roles: List of role names :type roles: list :param project: Project name :type project: string :param project_domain_name: Project Domain name :type project_domain_name: string :param expires_in: Sets how long the assertion is valid for, in seconds :type expires_in: int :returns: XML object """ expiration_time = self._determine_expiration_time(expires_in) status = self._create_status() saml_issuer = self._create_issuer(issuer) subject = self._create_subject(user, expiration_time, recipient) attribute_statement = self._create_attribute_statement( user, user_domain_name, roles, project, project_domain_name) authn_statement = self._create_authn_statement(issuer, expiration_time) signature = self._create_signature() assertion = self._create_assertion(saml_issuer, signature, subject, authn_statement, attribute_statement) assertion = _sign_assertion(assertion) response = self._create_response(saml_issuer, status, assertion, recipient) return response def _determine_expiration_time(self, expires_in): if expires_in is None: expires_in = CONF.saml.assertion_expiration_time now = timeutils.utcnow() future = now + datetime.timedelta(seconds=expires_in) return utils.isotime(future, subsecond=True) def _create_status(self): """Create an object that represents a SAML Status. :returns: XML object """ status = samlp.Status() status_code = samlp.StatusCode() status_code.value = samlp.STATUS_SUCCESS status_code.set_text('') status.status_code = status_code return status def _create_issuer(self, issuer_url): """Create an object that represents a SAML Issuer. https://acme.com/FIM/sps/openstack/saml20 :returns: XML object """ issuer = saml.Issuer() issuer.format = saml.NAMEID_FORMAT_ENTITY issuer.set_text(issuer_url) return issuer def _create_subject(self, user, expiration_time, recipient): """Create an object that represents a SAML Subject. john@smith.com :returns: XML object """ name_id = saml.NameID() name_id.set_text(user) subject_conf_data = saml.SubjectConfirmationData() subject_conf_data.recipient = recipient subject_conf_data.not_on_or_after = expiration_time subject_conf = saml.SubjectConfirmation() subject_conf.method = saml.SCM_BEARER subject_conf.subject_confirmation_data = subject_conf_data subject = saml.Subject() subject.subject_confirmation = subject_conf subject.name_id = name_id return subject def _create_attribute_statement(self, user, user_domain_name, roles, project, project_domain_name): """Create an object that represents a SAML AttributeStatement. test_user Default admin member development Default :returns: XML object """ def _build_attribute(attribute_name, attribute_values): attribute = saml.Attribute() attribute.name = attribute_name for value in attribute_values: attribute_value = saml.AttributeValue() attribute_value.set_text(value) attribute.attribute_value.append(attribute_value) return attribute user_attribute = _build_attribute('openstack_user', [user]) roles_attribute = _build_attribute('openstack_roles', roles) project_attribute = _build_attribute('openstack_project', [project]) project_domain_attribute = _build_attribute( 'openstack_project_domain', [project_domain_name]) user_domain_attribute = _build_attribute( 'openstack_user_domain', [user_domain_name]) attribute_statement = saml.AttributeStatement() attribute_statement.attribute.append(user_attribute) attribute_statement.attribute.append(roles_attribute) attribute_statement.attribute.append(project_attribute) attribute_statement.attribute.append(project_domain_attribute) attribute_statement.attribute.append(user_domain_attribute) return attribute_statement def _create_authn_statement(self, issuer, expiration_time): """Create an object that represents a SAML AuthnStatement. urn:oasis:names:tc:SAML:2.0:ac:classes:Password https://acme.com/FIM/sps/openstack/saml20 :returns: XML object """ authn_statement = saml.AuthnStatement() authn_statement.authn_instant = utils.isotime() authn_statement.session_index = uuid.uuid4().hex authn_statement.session_not_on_or_after = expiration_time authn_context = saml.AuthnContext() authn_context_class = saml.AuthnContextClassRef() authn_context_class.set_text(saml.AUTHN_PASSWORD) authn_authority = saml.AuthenticatingAuthority() authn_authority.set_text(issuer) authn_context.authn_context_class_ref = authn_context_class authn_context.authenticating_authority = authn_authority authn_statement.authn_context = authn_context return authn_statement def _create_assertion(self, issuer, signature, subject, authn_statement, attribute_statement): """Create an object that represents a SAML Assertion. ... ... ... ... ... :returns: XML object """ assertion = saml.Assertion() assertion.id = self.assertion_id assertion.issue_instant = utils.isotime() assertion.version = '2.0' assertion.issuer = issuer assertion.signature = signature assertion.subject = subject assertion.authn_statement = authn_statement assertion.attribute_statement = attribute_statement return assertion def _create_response(self, issuer, status, assertion, recipient): """Create an object that represents a SAML Response. ... ... ... :returns: XML object """ response = samlp.Response() response.id = uuid.uuid4().hex response.destination = recipient response.issue_instant = utils.isotime() response.version = '2.0' response.issuer = issuer response.status = status response.assertion = assertion return response def _create_signature(self): """Create an object that represents a SAML . This must be filled with algorithms that the signing binary will apply in order to sign the whole message. Currently we enforce X509 signing. Example of the template:: :returns: XML object """ canonicalization_method = xmldsig.CanonicalizationMethod() canonicalization_method.algorithm = xmldsig.ALG_EXC_C14N signature_method = xmldsig.SignatureMethod( algorithm=xmldsig.SIG_RSA_SHA1) transforms = xmldsig.Transforms() envelope_transform = xmldsig.Transform( algorithm=xmldsig.TRANSFORM_ENVELOPED) c14_transform = xmldsig.Transform(algorithm=xmldsig.ALG_EXC_C14N) transforms.transform = [envelope_transform, c14_transform] digest_method = xmldsig.DigestMethod(algorithm=xmldsig.DIGEST_SHA1) digest_value = xmldsig.DigestValue() reference = xmldsig.Reference() reference.uri = '#' + self.assertion_id reference.digest_method = digest_method reference.digest_value = digest_value reference.transforms = transforms signed_info = xmldsig.SignedInfo() signed_info.canonicalization_method = canonicalization_method signed_info.signature_method = signature_method signed_info.reference = reference key_info = xmldsig.KeyInfo() key_info.x509_data = xmldsig.X509Data() signature = xmldsig.Signature() signature.signed_info = signed_info signature.signature_value = xmldsig.SignatureValue() signature.key_info = key_info return signature def _sign_assertion(assertion): """Sign a SAML assertion. This method utilizes ``xmlsec1`` binary and signs SAML assertions in a separate process. ``xmlsec1`` cannot read input data from stdin so the prepared assertion needs to be serialized and stored in a temporary file. This file will be deleted immediately after ``xmlsec1`` returns. The signed assertion is redirected to a standard output and read using subprocess.PIPE redirection. A ``saml.Assertion`` class is created from the signed string again and returned. Parameters that are required in the CONF:: * xmlsec_binary * private key file path * public key file path :returns: XML object """ xmlsec_binary = CONF.saml.xmlsec1_binary idp_private_key = CONF.saml.keyfile idp_public_key = CONF.saml.certfile # xmlsec1 --sign --privkey-pem privkey,cert --id-attr:ID certificates = '%(idp_private_key)s,%(idp_public_key)s' % { 'idp_public_key': idp_public_key, 'idp_private_key': idp_private_key } command_list = [xmlsec_binary, '--sign', '--privkey-pem', certificates, '--id-attr:ID', 'Assertion'] file_path = None try: # NOTE(gyee): need to make the namespace prefixes explicit so # they won't get reassigned when we wrap the assertion into # SAML2 response file_path = fileutils.write_to_tempfile(assertion.to_string( nspair={'saml': saml2.NAMESPACE, 'xmldsig': xmldsig.NAMESPACE})) command_list.append(file_path) subprocess = environment.subprocess stdout = subprocess.check_output(command_list, # nosec : The contents # of the command list are coming from # a trusted source because the # executable and arguments all either # come from the config file or are # hardcoded. The command list is # initialized earlier in this function # to a list and it's still a list at # this point in the function. There is # no opportunity for an attacker to # attempt command injection via string # parsing. stderr=subprocess.STDOUT) except Exception as e: msg = _LE('Error when signing assertion, reason: %(reason)s%(output)s') LOG.error(msg, {'reason': e, 'output': ' ' + e.output if hasattr(e, 'output') else ''}) raise exception.SAMLSigningError(reason=e) finally: try: if file_path: os.remove(file_path) except OSError: # nosec # The file is already gone, good. pass return saml2.create_class_from_xml_string(saml.Assertion, stdout) class MetadataGenerator(object): """A class for generating SAML IdP Metadata.""" def generate_metadata(self): """Generate Identity Provider Metadata. Generate and format metadata into XML that can be exposed and consumed by a federated Service Provider. :returns: XML object. :raises keystone.exception.ValidationError: If the required config options aren't set. """ self._ensure_required_values_present() entity_descriptor = self._create_entity_descriptor() entity_descriptor.idpsso_descriptor = ( self._create_idp_sso_descriptor()) return entity_descriptor def _create_entity_descriptor(self): ed = md.EntityDescriptor() ed.entity_id = CONF.saml.idp_entity_id return ed def _create_idp_sso_descriptor(self): def get_cert(): try: return sigver.read_cert_from_file(CONF.saml.certfile, 'pem') except (IOError, sigver.CertificateError) as e: msg = _('Cannot open certificate %(cert_file)s. ' 'Reason: %(reason)s') msg = msg % {'cert_file': CONF.saml.certfile, 'reason': e} LOG.error(msg) raise IOError(msg) def key_descriptor(): cert = get_cert() return md.KeyDescriptor( key_info=xmldsig.KeyInfo( x509_data=xmldsig.X509Data( x509_certificate=xmldsig.X509Certificate(text=cert) ) ), use='signing' ) def single_sign_on_service(): idp_sso_endpoint = CONF.saml.idp_sso_endpoint return md.SingleSignOnService( binding=saml2.BINDING_URI, location=idp_sso_endpoint) def organization(): name = md.OrganizationName(lang=CONF.saml.idp_lang, text=CONF.saml.idp_organization_name) display_name = md.OrganizationDisplayName( lang=CONF.saml.idp_lang, text=CONF.saml.idp_organization_display_name) url = md.OrganizationURL(lang=CONF.saml.idp_lang, text=CONF.saml.idp_organization_url) return md.Organization( organization_display_name=display_name, organization_url=url, organization_name=name) def contact_person(): company = md.Company(text=CONF.saml.idp_contact_company) given_name = md.GivenName(text=CONF.saml.idp_contact_name) surname = md.SurName(text=CONF.saml.idp_contact_surname) email = md.EmailAddress(text=CONF.saml.idp_contact_email) telephone = md.TelephoneNumber( text=CONF.saml.idp_contact_telephone) contact_type = CONF.saml.idp_contact_type return md.ContactPerson( company=company, given_name=given_name, sur_name=surname, email_address=email, telephone_number=telephone, contact_type=contact_type) def name_id_format(): return md.NameIDFormat(text=saml.NAMEID_FORMAT_TRANSIENT) idpsso = md.IDPSSODescriptor() idpsso.protocol_support_enumeration = samlp.NAMESPACE idpsso.key_descriptor = key_descriptor() idpsso.single_sign_on_service = single_sign_on_service() idpsso.name_id_format = name_id_format() if self._check_organization_values(): idpsso.organization = organization() if self._check_contact_person_values(): idpsso.contact_person = contact_person() return idpsso def _ensure_required_values_present(self): """Ensure idp_sso_endpoint and idp_entity_id have values.""" if CONF.saml.idp_entity_id is None: msg = _('Ensure configuration option idp_entity_id is set.') raise exception.ValidationError(msg) if CONF.saml.idp_sso_endpoint is None: msg = _('Ensure configuration option idp_sso_endpoint is set.') raise exception.ValidationError(msg) def _check_contact_person_values(self): """Determine if contact information is included in metadata.""" # Check if we should include contact information params = [CONF.saml.idp_contact_company, CONF.saml.idp_contact_name, CONF.saml.idp_contact_surname, CONF.saml.idp_contact_email, CONF.saml.idp_contact_telephone] for value in params: if value is None: return False # Check if contact type is an invalid value valid_type_values = ['technical', 'other', 'support', 'administrative', 'billing'] if CONF.saml.idp_contact_type not in valid_type_values: msg = _('idp_contact_type must be one of: [technical, other, ' 'support, administrative or billing.') raise exception.ValidationError(msg) return True def _check_organization_values(self): """Determine if organization information is included in metadata.""" params = [CONF.saml.idp_organization_name, CONF.saml.idp_organization_display_name, CONF.saml.idp_organization_url] for value in params: if value is None: return False return True class ECPGenerator(object): """A class for generating an ECP assertion.""" @staticmethod def generate_ecp(saml_assertion, relay_state_prefix): ecp_generator = ECPGenerator() header = ecp_generator._create_header(relay_state_prefix) body = ecp_generator._create_body(saml_assertion) envelope = soapenv.Envelope(header=header, body=body) return envelope def _create_header(self, relay_state_prefix): relay_state_text = relay_state_prefix + uuid.uuid4().hex relay_state = ecp.RelayState(actor=client_base.ACTOR, must_understand='1', text=relay_state_text) header = soapenv.Header() header.extension_elements = ( [saml2.element_to_extension_element(relay_state)]) return header def _create_body(self, saml_assertion): body = soapenv.Body() body.extension_elements = ( [saml2.element_to_extension_element(saml_assertion)]) return body keystone-9.0.0/keystone/federation/controllers.py0000664000567000056710000005022212701407102023416 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Workflow logic for the Federation service.""" import string from oslo_config import cfg from oslo_log import log import six from six.moves import urllib import webob from keystone.auth import controllers as auth_controllers from keystone.common import authorization from keystone.common import controller from keystone.common import dependency from keystone.common import utils as k_utils from keystone.common import validation from keystone.common import wsgi from keystone import exception from keystone.federation import idp as keystone_idp from keystone.federation import schema from keystone.federation import utils from keystone.i18n import _ from keystone.models import token_model CONF = cfg.CONF LOG = log.getLogger(__name__) class _ControllerBase(controller.V3Controller): """Base behaviors for federation controllers.""" @classmethod def base_url(cls, context, path=None): """Construct a path and pass it to V3Controller.base_url method.""" path = '/OS-FEDERATION/' + cls.collection_name return super(_ControllerBase, cls).base_url(context, path=path) @dependency.requires('federation_api') class IdentityProvider(_ControllerBase): """Identity Provider representation.""" collection_name = 'identity_providers' member_name = 'identity_provider' _public_parameters = frozenset(['id', 'enabled', 'description', 'remote_ids', 'links' ]) @classmethod def _add_related_links(cls, context, ref): """Add URLs for entities related with Identity Provider. Add URLs pointing to: - protocols tied to the Identity Provider """ ref.setdefault('links', {}) base_path = ref['links'].get('self') if base_path is None: base_path = '/'.join([IdentityProvider.base_url(context), ref['id']]) for name in ['protocols']: ref['links'][name] = '/'.join([base_path, name]) @classmethod def _add_self_referential_link(cls, context, ref): id = ref['id'] self_path = '/'.join([cls.base_url(context), id]) ref.setdefault('links', {}) ref['links']['self'] = self_path @classmethod def wrap_member(cls, context, ref): cls._add_self_referential_link(context, ref) cls._add_related_links(context, ref) ref = cls.filter_params(ref) return {cls.member_name: ref} @controller.protected() @validation.validated(schema.identity_provider_create, 'identity_provider') def create_identity_provider(self, context, idp_id, identity_provider): identity_provider = self._normalize_dict(identity_provider) identity_provider.setdefault('enabled', False) idp_ref = self.federation_api.create_idp(idp_id, identity_provider) response = IdentityProvider.wrap_member(context, idp_ref) return wsgi.render_response(body=response, status=('201', 'Created')) @controller.filterprotected('id', 'enabled') def list_identity_providers(self, context, filters): hints = self.build_driver_hints(context, filters) ref = self.federation_api.list_idps(hints=hints) ref = [self.filter_params(x) for x in ref] return IdentityProvider.wrap_collection(context, ref, hints=hints) @controller.protected() def get_identity_provider(self, context, idp_id): ref = self.federation_api.get_idp(idp_id) return IdentityProvider.wrap_member(context, ref) @controller.protected() def delete_identity_provider(self, context, idp_id): self.federation_api.delete_idp(idp_id) @controller.protected() @validation.validated(schema.identity_provider_update, 'identity_provider') def update_identity_provider(self, context, idp_id, identity_provider): identity_provider = self._normalize_dict(identity_provider) idp_ref = self.federation_api.update_idp(idp_id, identity_provider) return IdentityProvider.wrap_member(context, idp_ref) @dependency.requires('federation_api') class FederationProtocol(_ControllerBase): """A federation protocol representation. See keystone.common.controller.V3Controller docstring for explanation on _public_parameters class attributes. """ collection_name = 'protocols' member_name = 'protocol' _public_parameters = frozenset(['id', 'mapping_id', 'links']) @classmethod def _add_self_referential_link(cls, context, ref): """Add 'links' entry to the response dictionary. Calls IdentityProvider.base_url() class method, as it constructs proper URL along with the 'identity providers' part included. :param ref: response dictionary """ ref.setdefault('links', {}) base_path = ref['links'].get('identity_provider') if base_path is None: base_path = [IdentityProvider.base_url(context), ref['idp_id']] base_path = '/'.join(base_path) self_path = [base_path, 'protocols', ref['id']] self_path = '/'.join(self_path) ref['links']['self'] = self_path @classmethod def _add_related_links(cls, context, ref): """Add new entries to the 'links' subdictionary in the response. Adds 'identity_provider' key with URL pointing to related identity provider as a value. :param ref: response dictionary """ ref.setdefault('links', {}) base_path = '/'.join([IdentityProvider.base_url(context), ref['idp_id']]) ref['links']['identity_provider'] = base_path @classmethod def wrap_member(cls, context, ref): cls._add_related_links(context, ref) cls._add_self_referential_link(context, ref) ref = cls.filter_params(ref) return {cls.member_name: ref} @controller.protected() @validation.validated(schema.federation_protocol_schema, 'protocol') def create_protocol(self, context, idp_id, protocol_id, protocol): ref = self._normalize_dict(protocol) ref = self.federation_api.create_protocol(idp_id, protocol_id, ref) response = FederationProtocol.wrap_member(context, ref) return wsgi.render_response(body=response, status=('201', 'Created')) @controller.protected() @validation.validated(schema.federation_protocol_schema, 'protocol') def update_protocol(self, context, idp_id, protocol_id, protocol): ref = self._normalize_dict(protocol) ref = self.federation_api.update_protocol(idp_id, protocol_id, protocol) return FederationProtocol.wrap_member(context, ref) @controller.protected() def get_protocol(self, context, idp_id, protocol_id): ref = self.federation_api.get_protocol(idp_id, protocol_id) return FederationProtocol.wrap_member(context, ref) @controller.protected() def list_protocols(self, context, idp_id): protocols_ref = self.federation_api.list_protocols(idp_id) protocols = list(protocols_ref) return FederationProtocol.wrap_collection(context, protocols) @controller.protected() def delete_protocol(self, context, idp_id, protocol_id): self.federation_api.delete_protocol(idp_id, protocol_id) @dependency.requires('federation_api') class MappingController(_ControllerBase): collection_name = 'mappings' member_name = 'mapping' @controller.protected() def create_mapping(self, context, mapping_id, mapping): ref = self._normalize_dict(mapping) utils.validate_mapping_structure(ref) mapping_ref = self.federation_api.create_mapping(mapping_id, ref) response = MappingController.wrap_member(context, mapping_ref) return wsgi.render_response(body=response, status=('201', 'Created')) @controller.protected() def list_mappings(self, context): ref = self.federation_api.list_mappings() return MappingController.wrap_collection(context, ref) @controller.protected() def get_mapping(self, context, mapping_id): ref = self.federation_api.get_mapping(mapping_id) return MappingController.wrap_member(context, ref) @controller.protected() def delete_mapping(self, context, mapping_id): self.federation_api.delete_mapping(mapping_id) @controller.protected() def update_mapping(self, context, mapping_id, mapping): mapping = self._normalize_dict(mapping) utils.validate_mapping_structure(mapping) mapping_ref = self.federation_api.update_mapping(mapping_id, mapping) return MappingController.wrap_member(context, mapping_ref) @dependency.requires('federation_api') class Auth(auth_controllers.Auth): def _get_sso_origin_host(self, context): """Validate and return originating dashboard URL. Make sure the parameter is specified in the request's URL as well its value belongs to a list of trusted dashboards. :param context: request's context :raises keystone.exception.ValidationError: ``origin`` query parameter was not specified. The URL is deemed invalid. :raises keystone.exception.Unauthorized: URL specified in origin query parameter does not exist in list of websso trusted dashboards. :returns: URL with the originating dashboard """ if 'origin' in context['query_string']: origin = context['query_string']['origin'] host = urllib.parse.unquote_plus(origin) else: msg = _('Request must have an origin query parameter') LOG.error(msg) raise exception.ValidationError(msg) # change trusted_dashboard hostnames to lowercase before comparison trusted_dashboards = [k_utils.lower_case_hostname(trusted) for trusted in CONF.federation.trusted_dashboard] if host not in trusted_dashboards: msg = _('%(host)s is not a trusted dashboard host') msg = msg % {'host': host} LOG.error(msg) raise exception.Unauthorized(msg) return host def federated_authentication(self, context, idp_id, protocol_id): """Authenticate from dedicated url endpoint. Build HTTP request body for federated authentication and inject it into the ``authenticate_for_token`` function. """ auth = { 'identity': { 'methods': [protocol_id], protocol_id: { 'identity_provider': idp_id, 'protocol': protocol_id } } } return self.authenticate_for_token(context, auth=auth) def federated_sso_auth(self, context, protocol_id): try: remote_id_name = utils.get_remote_id_parameter(protocol_id) remote_id = context['environment'][remote_id_name] except KeyError: msg = _('Missing entity ID from environment') LOG.error(msg) raise exception.Unauthorized(msg) host = self._get_sso_origin_host(context) ref = self.federation_api.get_idp_from_remote_id(remote_id) # NOTE(stevemar): the returned object is a simple dict that # contains the idp_id and remote_id. identity_provider = ref['idp_id'] res = self.federated_authentication(context, identity_provider, protocol_id) token_id = res.headers['X-Subject-Token'] return self.render_html_response(host, token_id) def federated_idp_specific_sso_auth(self, context, idp_id, protocol_id): host = self._get_sso_origin_host(context) # NOTE(lbragstad): We validate that the Identity Provider actually # exists in the Mapped authentication plugin. res = self.federated_authentication(context, idp_id, protocol_id) token_id = res.headers['X-Subject-Token'] return self.render_html_response(host, token_id) def render_html_response(self, host, token_id): """Forms an HTML Form from a template with autosubmit.""" headers = [('Content-Type', 'text/html')] with open(CONF.federation.sso_callback_template) as template: src = string.Template(template.read()) subs = {'host': host, 'token': token_id} body = src.substitute(subs) return webob.Response(body=body, status='200', headerlist=headers) def _create_base_saml_assertion(self, context, auth): issuer = CONF.saml.idp_entity_id sp_id = auth['scope']['service_provider']['id'] service_provider = self.federation_api.get_sp(sp_id) utils.assert_enabled_service_provider_object(service_provider) sp_url = service_provider['sp_url'] token_id = auth['identity']['token']['id'] token_data = self.token_provider_api.validate_token(token_id) token_ref = token_model.KeystoneToken(token_id, token_data) if not token_ref.project_scoped: action = _('Use a project scoped token when attempting to create ' 'a SAML assertion') raise exception.ForbiddenAction(action=action) subject = token_ref.user_name roles = token_ref.role_names project = token_ref.project_name # NOTE(rodrigods): the domain name is necessary in order to distinguish # between projects and users with the same name in different domains. project_domain_name = token_ref.project_domain_name subject_domain_name = token_ref.user_domain_name generator = keystone_idp.SAMLGenerator() response = generator.samlize_token( issuer, sp_url, subject, subject_domain_name, roles, project, project_domain_name) return (response, service_provider) def _build_response_headers(self, service_provider): return [('Content-Type', 'text/xml'), ('X-sp-url', six.binary_type(service_provider['sp_url'])), ('X-auth-url', six.binary_type(service_provider['auth_url']))] @validation.validated(schema.saml_create, 'auth') def create_saml_assertion(self, context, auth): """Exchange a scoped token for a SAML assertion. :param auth: Dictionary that contains a token and service provider ID :returns: SAML Assertion based on properties from the token """ t = self._create_base_saml_assertion(context, auth) (response, service_provider) = t headers = self._build_response_headers(service_provider) return wsgi.render_response(body=response.to_string(), status=('200', 'OK'), headers=headers) @validation.validated(schema.saml_create, 'auth') def create_ecp_assertion(self, context, auth): """Exchange a scoped token for an ECP assertion. :param auth: Dictionary that contains a token and service provider ID :returns: ECP Assertion based on properties from the token """ t = self._create_base_saml_assertion(context, auth) (saml_assertion, service_provider) = t relay_state_prefix = service_provider['relay_state_prefix'] generator = keystone_idp.ECPGenerator() ecp_assertion = generator.generate_ecp(saml_assertion, relay_state_prefix) headers = self._build_response_headers(service_provider) return wsgi.render_response(body=ecp_assertion.to_string(), status=('200', 'OK'), headers=headers) @dependency.requires('assignment_api', 'resource_api') class DomainV3(controller.V3Controller): collection_name = 'domains' member_name = 'domain' def __init__(self): super(DomainV3, self).__init__() self.get_member_from_driver = self.resource_api.get_domain @controller.protected() def list_domains_for_groups(self, context): """List all domains available to an authenticated user's groups. :param context: request context :returns: list of accessible domains """ auth_context = context['environment'][authorization.AUTH_CONTEXT_ENV] domains = self.assignment_api.list_domains_for_groups( auth_context['group_ids']) return DomainV3.wrap_collection(context, domains) @dependency.requires('assignment_api', 'resource_api') class ProjectAssignmentV3(controller.V3Controller): collection_name = 'projects' member_name = 'project' def __init__(self): super(ProjectAssignmentV3, self).__init__() self.get_member_from_driver = self.resource_api.get_project @controller.protected() def list_projects_for_groups(self, context): """List all projects available to an authenticated user's groups. :param context: request context :returns: list of accessible projects """ auth_context = context['environment'][authorization.AUTH_CONTEXT_ENV] projects = self.assignment_api.list_projects_for_groups( auth_context['group_ids']) return ProjectAssignmentV3.wrap_collection(context, projects) @dependency.requires('federation_api') class ServiceProvider(_ControllerBase): """Service Provider representation.""" collection_name = 'service_providers' member_name = 'service_provider' _public_parameters = frozenset(['auth_url', 'id', 'enabled', 'description', 'links', 'relay_state_prefix', 'sp_url']) @controller.protected() @validation.validated(schema.service_provider_create, 'service_provider') def create_service_provider(self, context, sp_id, service_provider): service_provider = self._normalize_dict(service_provider) service_provider.setdefault('enabled', False) service_provider.setdefault('relay_state_prefix', CONF.saml.relay_state_prefix) sp_ref = self.federation_api.create_sp(sp_id, service_provider) response = ServiceProvider.wrap_member(context, sp_ref) return wsgi.render_response(body=response, status=('201', 'Created')) @controller.filterprotected('id', 'enabled') def list_service_providers(self, context, filters): hints = self.build_driver_hints(context, filters) ref = self.federation_api.list_sps(hints=hints) ref = [self.filter_params(x) for x in ref] return ServiceProvider.wrap_collection(context, ref, hints=hints) @controller.protected() def get_service_provider(self, context, sp_id): ref = self.federation_api.get_sp(sp_id) return ServiceProvider.wrap_member(context, ref) @controller.protected() def delete_service_provider(self, context, sp_id): self.federation_api.delete_sp(sp_id) @controller.protected() @validation.validated(schema.service_provider_update, 'service_provider') def update_service_provider(self, context, sp_id, service_provider): service_provider = self._normalize_dict(service_provider) sp_ref = self.federation_api.update_sp(sp_id, service_provider) return ServiceProvider.wrap_member(context, sp_ref) class SAMLMetadataV3(_ControllerBase): member_name = 'metadata' def get_metadata(self, context): metadata_path = CONF.saml.idp_metadata_path try: with open(metadata_path, 'r') as metadata_handler: metadata = metadata_handler.read() except IOError as e: # Raise HTTP 500 in case Metadata file cannot be read. raise exception.MetadataFileError(reason=e) return wsgi.render_response(body=metadata, status=('200', 'OK'), headers=[('Content-Type', 'text/xml')]) keystone-9.0.0/keystone/federation/routers.py0000664000567000056710000002372612701407102022564 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools from keystone.common import json_home from keystone.common import wsgi from keystone.federation import controllers build_resource_relation = functools.partial( json_home.build_v3_extension_resource_relation, extension_name='OS-FEDERATION', extension_version='1.0') build_parameter_relation = functools.partial( json_home.build_v3_extension_parameter_relation, extension_name='OS-FEDERATION', extension_version='1.0') IDP_ID_PARAMETER_RELATION = build_parameter_relation(parameter_name='idp_id') PROTOCOL_ID_PARAMETER_RELATION = build_parameter_relation( parameter_name='protocol_id') SP_ID_PARAMETER_RELATION = build_parameter_relation(parameter_name='sp_id') class Routers(wsgi.RoutersBase): """API Endpoints for the Federation extension. The API looks like:: PUT /OS-FEDERATION/identity_providers/{idp_id} GET /OS-FEDERATION/identity_providers GET /OS-FEDERATION/identity_providers/{idp_id} DELETE /OS-FEDERATION/identity_providers/{idp_id} PATCH /OS-FEDERATION/identity_providers/{idp_id} PUT /OS-FEDERATION/identity_providers/ {idp_id}/protocols/{protocol_id} GET /OS-FEDERATION/identity_providers/ {idp_id}/protocols GET /OS-FEDERATION/identity_providers/ {idp_id}/protocols/{protocol_id} PATCH /OS-FEDERATION/identity_providers/ {idp_id}/protocols/{protocol_id} DELETE /OS-FEDERATION/identity_providers/ {idp_id}/protocols/{protocol_id} PUT /OS-FEDERATION/mappings GET /OS-FEDERATION/mappings PATCH /OS-FEDERATION/mappings/{mapping_id} GET /OS-FEDERATION/mappings/{mapping_id} DELETE /OS-FEDERATION/mappings/{mapping_id} GET /OS-FEDERATION/projects GET /OS-FEDERATION/domains PUT /OS-FEDERATION/service_providers/{sp_id} GET /OS-FEDERATION/service_providers GET /OS-FEDERATION/service_providers/{sp_id} DELETE /OS-FEDERATION/service_providers/{sp_id} PATCH /OS-FEDERATION/service_providers/{sp_id} GET /OS-FEDERATION/identity_providers/{idp_id}/ protocols/{protocol_id}/auth POST /OS-FEDERATION/identity_providers/{idp_id}/ protocols/{protocol_id}/auth GET /auth/OS-FEDERATION/identity_providers/ {idp_id}/protocols/{protocol_id}/websso ?origin=https%3A//horizon.example.com POST /auth/OS-FEDERATION/identity_providers/ {idp_id}/protocols/{protocol_id}/websso ?origin=https%3A//horizon.example.com POST /auth/OS-FEDERATION/saml2 POST /auth/OS-FEDERATION/saml2/ecp GET /OS-FEDERATION/saml2/metadata GET /auth/OS-FEDERATION/websso/{protocol_id} ?origin=https%3A//horizon.example.com POST /auth/OS-FEDERATION/websso/{protocol_id} ?origin=https%3A//horizon.example.com """ def _construct_url(self, suffix): return "/OS-FEDERATION/%s" % suffix def append_v3_routers(self, mapper, routers): auth_controller = controllers.Auth() idp_controller = controllers.IdentityProvider() protocol_controller = controllers.FederationProtocol() mapping_controller = controllers.MappingController() project_controller = controllers.ProjectAssignmentV3() domain_controller = controllers.DomainV3() saml_metadata_controller = controllers.SAMLMetadataV3() sp_controller = controllers.ServiceProvider() # Identity Provider CRUD operations self._add_resource( mapper, idp_controller, path=self._construct_url('identity_providers/{idp_id}'), get_action='get_identity_provider', put_action='create_identity_provider', patch_action='update_identity_provider', delete_action='delete_identity_provider', rel=build_resource_relation(resource_name='identity_provider'), path_vars={ 'idp_id': IDP_ID_PARAMETER_RELATION, }) self._add_resource( mapper, idp_controller, path=self._construct_url('identity_providers'), get_action='list_identity_providers', rel=build_resource_relation(resource_name='identity_providers')) # Protocol CRUD operations self._add_resource( mapper, protocol_controller, path=self._construct_url('identity_providers/{idp_id}/protocols/' '{protocol_id}'), get_action='get_protocol', put_action='create_protocol', patch_action='update_protocol', delete_action='delete_protocol', rel=build_resource_relation( resource_name='identity_provider_protocol'), path_vars={ 'idp_id': IDP_ID_PARAMETER_RELATION, 'protocol_id': PROTOCOL_ID_PARAMETER_RELATION, }) self._add_resource( mapper, protocol_controller, path=self._construct_url('identity_providers/{idp_id}/protocols'), get_action='list_protocols', rel=build_resource_relation( resource_name='identity_provider_protocols'), path_vars={ 'idp_id': IDP_ID_PARAMETER_RELATION, }) # Mapping CRUD operations self._add_resource( mapper, mapping_controller, path=self._construct_url('mappings/{mapping_id}'), get_action='get_mapping', put_action='create_mapping', patch_action='update_mapping', delete_action='delete_mapping', rel=build_resource_relation(resource_name='mapping'), path_vars={ 'mapping_id': build_parameter_relation( parameter_name='mapping_id'), }) self._add_resource( mapper, mapping_controller, path=self._construct_url('mappings'), get_action='list_mappings', rel=build_resource_relation(resource_name='mappings')) # Service Providers CRUD operations self._add_resource( mapper, sp_controller, path=self._construct_url('service_providers/{sp_id}'), get_action='get_service_provider', put_action='create_service_provider', patch_action='update_service_provider', delete_action='delete_service_provider', rel=build_resource_relation(resource_name='service_provider'), path_vars={ 'sp_id': SP_ID_PARAMETER_RELATION, }) self._add_resource( mapper, sp_controller, path=self._construct_url('service_providers'), get_action='list_service_providers', rel=build_resource_relation(resource_name='service_providers')) self._add_resource( mapper, domain_controller, path=self._construct_url('domains'), new_path='/auth/domains', get_action='list_domains_for_groups', rel=build_resource_relation(resource_name='domains')) self._add_resource( mapper, project_controller, path=self._construct_url('projects'), new_path='/auth/projects', get_action='list_projects_for_groups', rel=build_resource_relation(resource_name='projects')) # Auth operations self._add_resource( mapper, auth_controller, path=self._construct_url('identity_providers/{idp_id}/' 'protocols/{protocol_id}/auth'), get_post_action='federated_authentication', rel=build_resource_relation( resource_name='identity_provider_protocol_auth'), path_vars={ 'idp_id': IDP_ID_PARAMETER_RELATION, 'protocol_id': PROTOCOL_ID_PARAMETER_RELATION, }) self._add_resource( mapper, auth_controller, path='/auth' + self._construct_url('saml2'), post_action='create_saml_assertion', rel=build_resource_relation(resource_name='saml2')) self._add_resource( mapper, auth_controller, path='/auth' + self._construct_url('saml2/ecp'), post_action='create_ecp_assertion', rel=build_resource_relation(resource_name='ecp')) self._add_resource( mapper, auth_controller, path='/auth' + self._construct_url('websso/{protocol_id}'), get_post_action='federated_sso_auth', rel=build_resource_relation(resource_name='websso'), path_vars={ 'protocol_id': PROTOCOL_ID_PARAMETER_RELATION, }) self._add_resource( mapper, auth_controller, path='/auth' + self._construct_url( 'identity_providers/{idp_id}/protocols/{protocol_id}/websso'), get_post_action='federated_idp_specific_sso_auth', rel=build_resource_relation(resource_name='identity_providers'), path_vars={ 'idp_id': IDP_ID_PARAMETER_RELATION, 'protocol_id': PROTOCOL_ID_PARAMETER_RELATION, }) # Keystone-Identity-Provider metadata endpoint self._add_resource( mapper, saml_metadata_controller, path=self._construct_url('saml2/metadata'), get_action='get_metadata', rel=build_resource_relation(resource_name='metadata')) keystone-9.0.0/keystone/federation/V8_backends/0000775000567000056710000000000012701407246022635 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/federation/V8_backends/__init__.py0000664000567000056710000000000012701407102024723 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/federation/V8_backends/sql.py0000664000567000056710000003337112701407102024004 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils from sqlalchemy import orm from keystone.common import sql from keystone import exception from keystone.federation import core class FederationProtocolModel(sql.ModelBase, sql.DictBase): __tablename__ = 'federation_protocol' attributes = ['id', 'idp_id', 'mapping_id'] mutable_attributes = frozenset(['mapping_id']) id = sql.Column(sql.String(64), primary_key=True) idp_id = sql.Column(sql.String(64), sql.ForeignKey('identity_provider.id', ondelete='CASCADE'), primary_key=True) mapping_id = sql.Column(sql.String(64), nullable=False) @classmethod def from_dict(cls, dictionary): new_dictionary = dictionary.copy() return cls(**new_dictionary) def to_dict(self): """Return a dictionary with model's attributes.""" d = dict() for attr in self.__class__.attributes: d[attr] = getattr(self, attr) return d class IdentityProviderModel(sql.ModelBase, sql.DictBase): __tablename__ = 'identity_provider' attributes = ['id', 'enabled', 'description', 'remote_ids'] mutable_attributes = frozenset(['description', 'enabled', 'remote_ids']) id = sql.Column(sql.String(64), primary_key=True) enabled = sql.Column(sql.Boolean, nullable=False) description = sql.Column(sql.Text(), nullable=True) remote_ids = orm.relationship('IdPRemoteIdsModel', order_by='IdPRemoteIdsModel.remote_id', cascade='all, delete-orphan') @classmethod def from_dict(cls, dictionary): new_dictionary = dictionary.copy() remote_ids_list = new_dictionary.pop('remote_ids', None) if not remote_ids_list: remote_ids_list = [] identity_provider = cls(**new_dictionary) remote_ids = [] # NOTE(fmarco76): the remote_ids_list contains only remote ids # associated with the IdP because of the "relationship" established in # sqlalchemy and corresponding to the FK in the idp_remote_ids table for remote in remote_ids_list: remote_ids.append(IdPRemoteIdsModel(remote_id=remote)) identity_provider.remote_ids = remote_ids return identity_provider def to_dict(self): """Return a dictionary with model's attributes.""" d = dict() for attr in self.__class__.attributes: d[attr] = getattr(self, attr) d['remote_ids'] = [] for remote in self.remote_ids: d['remote_ids'].append(remote.remote_id) return d class IdPRemoteIdsModel(sql.ModelBase, sql.DictBase): __tablename__ = 'idp_remote_ids' attributes = ['idp_id', 'remote_id'] mutable_attributes = frozenset(['idp_id', 'remote_id']) idp_id = sql.Column(sql.String(64), sql.ForeignKey('identity_provider.id', ondelete='CASCADE')) remote_id = sql.Column(sql.String(255), primary_key=True) @classmethod def from_dict(cls, dictionary): new_dictionary = dictionary.copy() return cls(**new_dictionary) def to_dict(self): """Return a dictionary with model's attributes.""" d = dict() for attr in self.__class__.attributes: d[attr] = getattr(self, attr) return d class MappingModel(sql.ModelBase, sql.DictBase): __tablename__ = 'mapping' attributes = ['id', 'rules'] id = sql.Column(sql.String(64), primary_key=True) rules = sql.Column(sql.JsonBlob(), nullable=False) @classmethod def from_dict(cls, dictionary): new_dictionary = dictionary.copy() new_dictionary['rules'] = jsonutils.dumps(new_dictionary['rules']) return cls(**new_dictionary) def to_dict(self): """Return a dictionary with model's attributes.""" d = dict() for attr in self.__class__.attributes: d[attr] = getattr(self, attr) d['rules'] = jsonutils.loads(d['rules']) return d class ServiceProviderModel(sql.ModelBase, sql.DictBase): __tablename__ = 'service_provider' attributes = ['auth_url', 'id', 'enabled', 'description', 'relay_state_prefix', 'sp_url'] mutable_attributes = frozenset(['auth_url', 'description', 'enabled', 'relay_state_prefix', 'sp_url']) id = sql.Column(sql.String(64), primary_key=True) enabled = sql.Column(sql.Boolean, nullable=False) description = sql.Column(sql.Text(), nullable=True) auth_url = sql.Column(sql.String(256), nullable=False) sp_url = sql.Column(sql.String(256), nullable=False) relay_state_prefix = sql.Column(sql.String(256), nullable=False) @classmethod def from_dict(cls, dictionary): new_dictionary = dictionary.copy() return cls(**new_dictionary) def to_dict(self): """Return a dictionary with model's attributes.""" d = dict() for attr in self.__class__.attributes: d[attr] = getattr(self, attr) return d class Federation(core.FederationDriverV8): # Identity Provider CRUD @sql.handle_conflicts(conflict_type='identity_provider') def create_idp(self, idp_id, idp): idp['id'] = idp_id with sql.session_for_write() as session: idp_ref = IdentityProviderModel.from_dict(idp) session.add(idp_ref) return idp_ref.to_dict() def delete_idp(self, idp_id): with sql.session_for_write() as session: self._delete_assigned_protocols(session, idp_id) idp_ref = self._get_idp(session, idp_id) session.delete(idp_ref) def _get_idp(self, session, idp_id): idp_ref = session.query(IdentityProviderModel).get(idp_id) if not idp_ref: raise exception.IdentityProviderNotFound(idp_id=idp_id) return idp_ref def _get_idp_from_remote_id(self, session, remote_id): q = session.query(IdPRemoteIdsModel) q = q.filter_by(remote_id=remote_id) try: return q.one() except sql.NotFound: raise exception.IdentityProviderNotFound(idp_id=remote_id) def list_idps(self): with sql.session_for_read() as session: idps = session.query(IdentityProviderModel) idps_list = [idp.to_dict() for idp in idps] return idps_list def get_idp(self, idp_id): with sql.session_for_read() as session: idp_ref = self._get_idp(session, idp_id) return idp_ref.to_dict() def get_idp_from_remote_id(self, remote_id): with sql.session_for_read() as session: ref = self._get_idp_from_remote_id(session, remote_id) return ref.to_dict() def update_idp(self, idp_id, idp): with sql.session_for_write() as session: idp_ref = self._get_idp(session, idp_id) old_idp = idp_ref.to_dict() old_idp.update(idp) new_idp = IdentityProviderModel.from_dict(old_idp) for attr in IdentityProviderModel.mutable_attributes: setattr(idp_ref, attr, getattr(new_idp, attr)) return idp_ref.to_dict() # Protocol CRUD def _get_protocol(self, session, idp_id, protocol_id): q = session.query(FederationProtocolModel) q = q.filter_by(id=protocol_id, idp_id=idp_id) try: return q.one() except sql.NotFound: kwargs = {'protocol_id': protocol_id, 'idp_id': idp_id} raise exception.FederatedProtocolNotFound(**kwargs) @sql.handle_conflicts(conflict_type='federation_protocol') def create_protocol(self, idp_id, protocol_id, protocol): protocol['id'] = protocol_id protocol['idp_id'] = idp_id with sql.session_for_write() as session: self._get_idp(session, idp_id) protocol_ref = FederationProtocolModel.from_dict(protocol) session.add(protocol_ref) return protocol_ref.to_dict() def update_protocol(self, idp_id, protocol_id, protocol): with sql.session_for_write() as session: proto_ref = self._get_protocol(session, idp_id, protocol_id) old_proto = proto_ref.to_dict() old_proto.update(protocol) new_proto = FederationProtocolModel.from_dict(old_proto) for attr in FederationProtocolModel.mutable_attributes: setattr(proto_ref, attr, getattr(new_proto, attr)) return proto_ref.to_dict() def get_protocol(self, idp_id, protocol_id): with sql.session_for_read() as session: protocol_ref = self._get_protocol(session, idp_id, protocol_id) return protocol_ref.to_dict() def list_protocols(self, idp_id): with sql.session_for_read() as session: q = session.query(FederationProtocolModel) q = q.filter_by(idp_id=idp_id) protocols = [protocol.to_dict() for protocol in q] return protocols def delete_protocol(self, idp_id, protocol_id): with sql.session_for_write() as session: key_ref = self._get_protocol(session, idp_id, protocol_id) session.delete(key_ref) def _delete_assigned_protocols(self, session, idp_id): query = session.query(FederationProtocolModel) query = query.filter_by(idp_id=idp_id) query.delete() # Mapping CRUD def _get_mapping(self, session, mapping_id): mapping_ref = session.query(MappingModel).get(mapping_id) if not mapping_ref: raise exception.MappingNotFound(mapping_id=mapping_id) return mapping_ref @sql.handle_conflicts(conflict_type='mapping') def create_mapping(self, mapping_id, mapping): ref = {} ref['id'] = mapping_id ref['rules'] = mapping.get('rules') with sql.session_for_write() as session: mapping_ref = MappingModel.from_dict(ref) session.add(mapping_ref) return mapping_ref.to_dict() def delete_mapping(self, mapping_id): with sql.session_for_write() as session: mapping_ref = self._get_mapping(session, mapping_id) session.delete(mapping_ref) def list_mappings(self): with sql.session_for_read() as session: mappings = session.query(MappingModel) return [x.to_dict() for x in mappings] def get_mapping(self, mapping_id): with sql.session_for_read() as session: mapping_ref = self._get_mapping(session, mapping_id) return mapping_ref.to_dict() @sql.handle_conflicts(conflict_type='mapping') def update_mapping(self, mapping_id, mapping): ref = {} ref['id'] = mapping_id ref['rules'] = mapping.get('rules') with sql.session_for_write() as session: mapping_ref = self._get_mapping(session, mapping_id) old_mapping = mapping_ref.to_dict() old_mapping.update(ref) new_mapping = MappingModel.from_dict(old_mapping) for attr in MappingModel.attributes: setattr(mapping_ref, attr, getattr(new_mapping, attr)) return mapping_ref.to_dict() def get_mapping_from_idp_and_protocol(self, idp_id, protocol_id): with sql.session_for_read() as session: protocol_ref = self._get_protocol(session, idp_id, protocol_id) mapping_id = protocol_ref.mapping_id mapping_ref = self._get_mapping(session, mapping_id) return mapping_ref.to_dict() # Service Provider CRUD @sql.handle_conflicts(conflict_type='service_provider') def create_sp(self, sp_id, sp): sp['id'] = sp_id with sql.session_for_write() as session: sp_ref = ServiceProviderModel.from_dict(sp) session.add(sp_ref) return sp_ref.to_dict() def delete_sp(self, sp_id): with sql.session_for_write() as session: sp_ref = self._get_sp(session, sp_id) session.delete(sp_ref) def _get_sp(self, session, sp_id): sp_ref = session.query(ServiceProviderModel).get(sp_id) if not sp_ref: raise exception.ServiceProviderNotFound(sp_id=sp_id) return sp_ref def list_sps(self): with sql.session_for_read() as session: sps = session.query(ServiceProviderModel) sps_list = [sp.to_dict() for sp in sps] return sps_list def get_sp(self, sp_id): with sql.session_for_read() as session: sp_ref = self._get_sp(session, sp_id) return sp_ref.to_dict() def update_sp(self, sp_id, sp): with sql.session_for_write() as session: sp_ref = self._get_sp(session, sp_id) old_sp = sp_ref.to_dict() old_sp.update(sp) new_sp = ServiceProviderModel.from_dict(old_sp) for attr in ServiceProviderModel.mutable_attributes: setattr(sp_ref, attr, getattr(new_sp, attr)) return sp_ref.to_dict() def get_enabled_service_providers(self): with sql.session_for_read() as session: service_providers = session.query(ServiceProviderModel) service_providers = service_providers.filter_by(enabled=True) return service_providers keystone-9.0.0/keystone/models/0000775000567000056710000000000012701407246017651 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/models/__init__.py0000664000567000056710000000000012701407102021737 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/models/revoke_model.py0000664000567000056710000003157012701407102022673 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import timeutils from six.moves import map from keystone.common import utils # The set of attributes common between the RevokeEvent # and the dictionaries created from the token Data. _NAMES = ['trust_id', 'consumer_id', 'access_token_id', 'audit_id', 'audit_chain_id', 'expires_at', 'domain_id', 'project_id', 'user_id', 'role_id'] # Additional arguments for creating a RevokeEvent _EVENT_ARGS = ['issued_before', 'revoked_at'] # Names of attributes in the RevocationEvent, including "virtual" attributes. # Virtual attributes are those added based on other values. _EVENT_NAMES = _NAMES + ['domain_scope_id'] # Values that will be in the token data but not in the event. # These will compared with event values that have different names. # For example: both trustor_id and trustee_id are compared against user_id _TOKEN_KEYS = ['identity_domain_id', 'assignment_domain_id', 'issued_at', 'trustor_id', 'trustee_id'] # Alternative names to be checked in token for every field in # revoke tree. ALTERNATIVES = { 'user_id': ['user_id', 'trustor_id', 'trustee_id'], 'domain_id': ['identity_domain_id', 'assignment_domain_id'], # For a domain-scoped token, the domain is in assignment_domain_id. 'domain_scope_id': ['assignment_domain_id', ], } REVOKE_KEYS = _NAMES + _EVENT_ARGS def blank_token_data(issued_at): token_data = dict() for name in _NAMES: token_data[name] = None for name in _TOKEN_KEYS: token_data[name] = None # required field token_data['issued_at'] = issued_at return token_data class RevokeEvent(object): def __init__(self, **kwargs): for k in REVOKE_KEYS: v = kwargs.get(k) setattr(self, k, v) if self.domain_id and self.expires_at: # This is revoking a domain-scoped token. self.domain_scope_id = self.domain_id self.domain_id = None else: # This is revoking all tokens for a domain. self.domain_scope_id = None if self.expires_at is not None: # Trim off the expiration time because MySQL timestamps are only # accurate to the second. self.expires_at = self.expires_at.replace(microsecond=0) if self.revoked_at is None: self.revoked_at = timeutils.utcnow() if self.issued_before is None: self.issued_before = self.revoked_at def to_dict(self): keys = ['user_id', 'role_id', 'domain_id', 'domain_scope_id', 'project_id', 'audit_id', 'audit_chain_id', ] event = {key: self.__dict__[key] for key in keys if self.__dict__[key] is not None} if self.trust_id is not None: event['OS-TRUST:trust_id'] = self.trust_id if self.consumer_id is not None: event['OS-OAUTH1:consumer_id'] = self.consumer_id if self.consumer_id is not None: event['OS-OAUTH1:access_token_id'] = self.access_token_id if self.expires_at is not None: event['expires_at'] = utils.isotime(self.expires_at) if self.issued_before is not None: event['issued_before'] = utils.isotime(self.issued_before, subsecond=True) return event def key_for_name(self, name): return "%s=%s" % (name, getattr(self, name) or '*') def attr_keys(event): return list(map(event.key_for_name, _EVENT_NAMES)) class RevokeTree(object): """Fast Revocation Checking Tree Structure The Tree is an index to quickly match tokens against events. Each node is a hashtable of key=value combinations from revocation events. The """ def __init__(self, revoke_events=None): self.revoke_map = dict() self.add_events(revoke_events) def add_event(self, event): """Updates the tree based on a revocation event. Creates any necessary internal nodes in the tree corresponding to the fields of the revocation event. The leaf node will always be set to the latest 'issued_before' for events that are otherwise identical. :param: Event to add to the tree :returns: the event that was passed in. """ revoke_map = self.revoke_map for key in attr_keys(event): revoke_map = revoke_map.setdefault(key, {}) revoke_map['issued_before'] = max( event.issued_before, revoke_map.get( 'issued_before', event.issued_before)) return event def remove_event(self, event): """Update the tree based on the removal of a Revocation Event Removes empty nodes from the tree from the leaf back to the root. If multiple events trace the same path, but have different 'issued_before' values, only the last is ever stored in the tree. So only an exact match on 'issued_before' ever triggers a removal :param: Event to remove from the tree """ stack = [] revoke_map = self.revoke_map for name in _EVENT_NAMES: key = event.key_for_name(name) nxt = revoke_map.get(key) if nxt is None: break stack.append((revoke_map, key, nxt)) revoke_map = nxt else: if event.issued_before == revoke_map['issued_before']: revoke_map.pop('issued_before') for parent, key, child in reversed(stack): if not any(child): del parent[key] def add_events(self, revoke_events): return list(map(self.add_event, revoke_events or [])) @staticmethod def _next_level_keys(name, token_data): """Generate keys based on current field name and token data Generate all keys to look for in the next iteration of revocation event tree traversal. """ yield '*' if name == 'role_id': # Roles are very special since a token has a list of them. # If the revocation event matches any one of them, # revoke the token. for role_id in token_data.get('roles', []): yield role_id else: # For other fields we try to get any branch that concur # with any alternative field in the token. for alt_name in ALTERNATIVES.get(name, [name]): yield token_data[alt_name] def _search(self, revoke_map, names, token_data): """Search for revocation event by token_data Traverse the revocation events tree looking for event matching token data issued after the token. """ if not names: # The last (leaf) level is checked in a special way because we # verify issued_at field differently. try: return revoke_map['issued_before'] >= token_data['issued_at'] except KeyError: return False name, remaining_names = names[0], names[1:] for key in self._next_level_keys(name, token_data): subtree = revoke_map.get('%s=%s' % (name, key)) if subtree and self._search(subtree, remaining_names, token_data): return True # If we made it out of the loop then no element in revocation tree # corresponds to our token and it is good. return False def is_revoked(self, token_data): """Check if a token matches the revocation event Compare the values for each level of the tree with the values from the token, accounting for attributes that have alternative keys, and for wildcard matches. if there is a match, continue down the tree. if there is no match, exit early. token_data is a map based on a flattened view of token. The required fields are: 'expires_at','user_id', 'project_id', 'identity_domain_id', 'assignment_domain_id', 'trust_id', 'trustor_id', 'trustee_id' 'consumer_id', 'access_token_id' """ return self._search(self.revoke_map, _EVENT_NAMES, token_data) def build_token_values_v2(access, default_domain_id): token_data = access['token'] token_expires_at = timeutils.parse_isotime(token_data['expires']) # Trim off the microseconds because the revocation event only has # expirations accurate to the second. token_expires_at = token_expires_at.replace(microsecond=0) token_values = { 'expires_at': timeutils.normalize_time(token_expires_at), 'issued_at': timeutils.normalize_time( timeutils.parse_isotime(token_data['issued_at'])), 'audit_id': token_data.get('audit_ids', [None])[0], 'audit_chain_id': token_data.get('audit_ids', [None])[-1], } token_values['user_id'] = access.get('user', {}).get('id') project = token_data.get('tenant') if project is not None: token_values['project_id'] = project['id'] else: token_values['project_id'] = None token_values['identity_domain_id'] = default_domain_id token_values['assignment_domain_id'] = default_domain_id trust = token_data.get('trust') if trust is None: token_values['trust_id'] = None token_values['trustor_id'] = None token_values['trustee_id'] = None else: token_values['trust_id'] = trust['id'] token_values['trustor_id'] = trust['trustor_id'] token_values['trustee_id'] = trust['trustee_id'] token_values['consumer_id'] = None token_values['access_token_id'] = None role_list = [] # Roles are by ID in metadata and by name in the user section roles = access.get('metadata', {}).get('roles', []) for role in roles: role_list.append(role) token_values['roles'] = role_list return token_values def build_token_values(token_data): token_expires_at = timeutils.parse_isotime(token_data['expires_at']) # Trim off the microseconds because the revocation event only has # expirations accurate to the second. token_expires_at = token_expires_at.replace(microsecond=0) token_values = { 'expires_at': timeutils.normalize_time(token_expires_at), 'issued_at': timeutils.normalize_time( timeutils.parse_isotime(token_data['issued_at'])), 'audit_id': token_data.get('audit_ids', [None])[0], 'audit_chain_id': token_data.get('audit_ids', [None])[-1], } user = token_data.get('user') if user is not None: token_values['user_id'] = user['id'] # Federated users do not have a domain, be defensive and get the user # domain set to None in the federated user case. token_values['identity_domain_id'] = user.get('domain', {}).get('id') else: token_values['user_id'] = None token_values['identity_domain_id'] = None project = token_data.get('project', token_data.get('tenant')) if project is not None: token_values['project_id'] = project['id'] # The domain_id of projects acting as domains is None token_values['assignment_domain_id'] = ( project['domain']['id'] if project['domain'] else None) else: token_values['project_id'] = None domain = token_data.get('domain') if domain is not None: token_values['assignment_domain_id'] = domain['id'] else: token_values['assignment_domain_id'] = None role_list = [] roles = token_data.get('roles') if roles is not None: for role in roles: role_list.append(role['id']) token_values['roles'] = role_list trust = token_data.get('OS-TRUST:trust') if trust is None: token_values['trust_id'] = None token_values['trustor_id'] = None token_values['trustee_id'] = None else: token_values['trust_id'] = trust['id'] token_values['trustor_id'] = trust['trustor_user']['id'] token_values['trustee_id'] = trust['trustee_user']['id'] oauth1 = token_data.get('OS-OAUTH1') if oauth1 is None: token_values['consumer_id'] = None token_values['access_token_id'] = None else: token_values['consumer_id'] = oauth1['consumer_id'] token_values['access_token_id'] = oauth1['access_token_id'] return token_values keystone-9.0.0/keystone/models/token_model.py0000664000567000056710000002454612701407102022525 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unified in-memory token model.""" from keystoneclient.common import cms from oslo_config import cfg from oslo_utils import reflection from oslo_utils import timeutils import six from keystone import exception from keystone.federation import constants from keystone.i18n import _ CONF = cfg.CONF # supported token versions V2 = 'v2.0' V3 = 'v3.0' VERSIONS = frozenset([V2, V3]) def _parse_and_normalize_time(time_data): if isinstance(time_data, six.string_types): time_data = timeutils.parse_isotime(time_data) return timeutils.normalize_time(time_data) class KeystoneToken(dict): """An in-memory representation that unifies v2 and v3 tokens.""" # TODO(morganfainberg): Align this in-memory representation with the # objects in keystoneclient. This object should be eventually updated # to be the source of token data with the ability to emit any version # of the token instead of only consuming the token dict and providing # property accessors for the underlying data. def __init__(self, token_id, token_data): self.token_data = token_data if 'access' in token_data: super(KeystoneToken, self).__init__(**token_data['access']) self.version = V2 elif 'token' in token_data and 'methods' in token_data['token']: super(KeystoneToken, self).__init__(**token_data['token']) self.version = V3 else: raise exception.UnsupportedTokenVersionException() self.token_id = token_id self.short_id = cms.cms_hash_token(token_id, mode=CONF.token.hash_algorithm) if self.project_scoped and self.domain_scoped: raise exception.UnexpectedError(_('Found invalid token: scoped to ' 'both project and domain.')) def __repr__(self): desc = ('<%(type)s (audit_id=%(audit_id)s, ' 'audit_chain_id=%(audit_chain_id)s) at %(loc)s>') self_cls_name = reflection.get_class_name(self, fully_qualified=False) return desc % {'type': self_cls_name, 'audit_id': self.audit_id, 'audit_chain_id': self.audit_chain_id, 'loc': hex(id(self))} @property def expires(self): if self.version is V3: expires_at = self['expires_at'] else: expires_at = self['token']['expires'] return _parse_and_normalize_time(expires_at) @property def issued(self): if self.version is V3: issued_at = self['issued_at'] else: issued_at = self['token']['issued_at'] return _parse_and_normalize_time(issued_at) @property def audit_id(self): if self.version is V3: return self.get('audit_ids', [None])[0] return self['token'].get('audit_ids', [None])[0] @property def audit_chain_id(self): if self.version is V3: return self.get('audit_ids', [None])[-1] return self['token'].get('audit_ids', [None])[-1] @property def auth_token(self): return self.token_id @property def user_id(self): return self['user']['id'] @property def user_name(self): return self['user']['name'] @property def user_domain_name(self): try: if self.version == V3: return self['user']['domain']['name'] elif 'user' in self: return "Default" except KeyError: # nosec # Do not raise KeyError, raise UnexpectedError pass raise exception.UnexpectedError() @property def user_domain_id(self): try: if self.version == V3: return self['user']['domain']['id'] elif 'user' in self: return CONF.identity.default_domain_id except KeyError: # nosec # Do not raise KeyError, raise UnexpectedError pass raise exception.UnexpectedError() @property def domain_id(self): if self.version is V3: try: return self['domain']['id'] except KeyError: # Do not raise KeyError, raise UnexpectedError raise exception.UnexpectedError() # No domain scoped tokens in V2. raise NotImplementedError() @property def domain_name(self): if self.version is V3: try: return self['domain']['name'] except KeyError: # Do not raise KeyError, raise UnexpectedError raise exception.UnexpectedError() # No domain scoped tokens in V2. raise NotImplementedError() @property def project_id(self): try: if self.version is V3: return self['project']['id'] else: return self['token']['tenant']['id'] except KeyError: # Do not raise KeyError, raise UnexpectedError raise exception.UnexpectedError() @property def project_name(self): try: if self.version is V3: return self['project']['name'] else: return self['token']['tenant']['name'] except KeyError: # Do not raise KeyError, raise UnexpectedError raise exception.UnexpectedError() @property def project_domain_id(self): try: if self.version is V3: return self['project']['domain']['id'] elif 'tenant' in self['token']: return CONF.identity.default_domain_id except KeyError: # nosec # Do not raise KeyError, raise UnexpectedError pass raise exception.UnexpectedError() @property def project_domain_name(self): try: if self.version is V3: return self['project']['domain']['name'] if 'tenant' in self['token']: return 'Default' except KeyError: # nosec # Do not raise KeyError, raise UnexpectedError pass raise exception.UnexpectedError() @property def project_scoped(self): if self.version is V3: return 'project' in self else: return 'tenant' in self['token'] @property def domain_scoped(self): if self.version is V3: return 'domain' in self return False @property def scoped(self): return self.project_scoped or self.domain_scoped @property def trust_id(self): if self.version is V3: return self.get('OS-TRUST:trust', {}).get('id') else: return self.get('trust', {}).get('id') @property def trust_scoped(self): if self.version is V3: return 'OS-TRUST:trust' in self else: return 'trust' in self @property def trustee_user_id(self): if self.version is V3: return self.get( 'OS-TRUST:trust', {}).get('trustee_user_id') else: return self.get('trust', {}).get('trustee_user_id') @property def trustor_user_id(self): if self.version is V3: return self.get( 'OS-TRUST:trust', {}).get('trustor_user_id') else: return self.get('trust', {}).get('trustor_user_id') @property def trust_impersonation(self): if self.version is V3: return self.get('OS-TRUST:trust', {}).get('impersonation') else: return self.get('trust', {}).get('impersonation') @property def oauth_scoped(self): return 'OS-OAUTH1' in self @property def oauth_access_token_id(self): if self.version is V3 and self.oauth_scoped: return self['OS-OAUTH1']['access_token_id'] return None @property def oauth_consumer_id(self): if self.version is V3 and self.oauth_scoped: return self['OS-OAUTH1']['consumer_id'] return None @property def role_ids(self): if self.version is V3: return [r['id'] for r in self.get('roles', [])] else: return self.get('metadata', {}).get('roles', []) @property def role_names(self): if self.version is V3: return [r['name'] for r in self.get('roles', [])] else: return [r['name'] for r in self['user'].get('roles', [])] @property def bind(self): if self.version is V3: return self.get('bind') return self.get('token', {}).get('bind') @property def is_federated_user(self): try: return (self.version is V3 and constants.FEDERATION in self['user']) except KeyError: raise exception.UnexpectedError() @property def federation_group_ids(self): if self.is_federated_user: if self.version is V3: try: groups = self['user'][constants.FEDERATION].get( 'groups', []) return [g['id'] for g in groups] except KeyError: raise exception.UnexpectedError() return [] @property def federation_idp_id(self): if self.version is not V3 or not self.is_federated_user: return None return self['user'][constants.FEDERATION]['identity_provider']['id'] @property def federation_protocol_id(self): if self.version is V3 and self.is_federated_user: return self['user'][constants.FEDERATION]['protocol']['id'] return None @property def metadata(self): return self.get('metadata', {}) @property def methods(self): if self.version is V3: return self.get('methods', []) return [] keystone-9.0.0/keystone/cmd/0000775000567000056710000000000012701407246017131 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/cmd/cli.py0000664000567000056710000011542512701407102020251 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import absolute_import from __future__ import print_function import os import sys import uuid from oslo_config import cfg from oslo_log import log from oslo_log import versionutils from oslo_serialization import jsonutils import pbr.version from keystone.common import config from keystone.common import driver_hints from keystone.common import openssl from keystone.common import sql from keystone.common.sql import migration_helpers from keystone.common import utils from keystone import exception from keystone.federation import idp from keystone.federation import utils as mapping_engine from keystone.i18n import _, _LW, _LI from keystone.server import backends from keystone import token CONF = cfg.CONF LOG = log.getLogger(__name__) class BaseApp(object): name = None @classmethod def add_argument_parser(cls, subparsers): parser = subparsers.add_parser(cls.name, help=cls.__doc__) parser.set_defaults(cmd_class=cls) return parser class BootStrap(BaseApp): """Perform the basic bootstrap process""" name = "bootstrap" def __init__(self): self.load_backends() self.project_id = uuid.uuid4().hex self.role_id = uuid.uuid4().hex self.service_id = None self.service_name = None self.username = None self.project_name = None self.role_name = None self.password = None self.public_url = None self.internal_url = None self.admin_url = None self.region_id = None self.endpoints = {} @classmethod def add_argument_parser(cls, subparsers): parser = super(BootStrap, cls).add_argument_parser(subparsers) parser.add_argument('--bootstrap-username', default='admin', metavar='OS_BOOTSTRAP_USERNAME', help=('The username of the initial keystone ' 'user during bootstrap process.')) # NOTE(morganfainberg): See below for ENV Variable that can be used # in lieu of the command-line arguments. parser.add_argument('--bootstrap-password', default=None, metavar='OS_BOOTSTRAP_PASSWORD', help='The bootstrap user password') parser.add_argument('--bootstrap-project-name', default='admin', metavar='OS_BOOTSTRAP_PROJECT_NAME', help=('The initial project created during the ' 'keystone bootstrap process.')) parser.add_argument('--bootstrap-role-name', default='admin', metavar='OS_BOOTSTRAP_ROLE_NAME', help=('The initial role-name created during the ' 'keystone bootstrap process.')) parser.add_argument('--bootstrap-service-name', default='keystone', metavar='OS_BOOTSTRAP_SERVICE_NAME', help=('The initial name for the initial identity ' 'service created during the keystone ' 'bootstrap process.')) parser.add_argument('--bootstrap-admin-url', metavar='OS_BOOTSTRAP_ADMIN_URL', help=('The initial identity admin url created ' 'during the keystone bootstrap process. ' 'e.g. http://127.0.0.1:35357/v2.0')) parser.add_argument('--bootstrap-public-url', metavar='OS_BOOTSTRAP_PUBLIC_URL', help=('The initial identity public url created ' 'during the keystone bootstrap process. ' 'e.g. http://127.0.0.1:5000/v2.0')) parser.add_argument('--bootstrap-internal-url', metavar='OS_BOOTSTRAP_INTERNAL_URL', help=('The initial identity internal url created ' 'during the keystone bootstrap process. ' 'e.g. http://127.0.0.1:5000/v2.0')) parser.add_argument('--bootstrap-region-id', metavar='OS_BOOTSTRAP_REGION_ID', help=('The initial region_id endpoints will be ' 'placed in during the keystone bootstrap ' 'process.')) return parser def load_backends(self): drivers = backends.load_backends() self.resource_manager = drivers['resource_api'] self.identity_manager = drivers['identity_api'] self.assignment_manager = drivers['assignment_api'] self.catalog_manager = drivers['catalog_api'] self.role_manager = drivers['role_api'] def _get_config(self): self.username = ( os.environ.get('OS_BOOTSTRAP_USERNAME') or CONF.command.bootstrap_username) self.project_name = ( os.environ.get('OS_BOOTSTRAP_PROJECT_NAME') or CONF.command.bootstrap_project_name) self.role_name = ( os.environ.get('OS_BOOTSTRAP_ROLE_NAME') or CONF.command.bootstrap_role_name) self.password = ( os.environ.get('OS_BOOTSTRAP_PASSWORD') or CONF.command.bootstrap_password) self.service_name = ( os.environ.get('OS_BOOTSTRAP_SERVICE_NAME') or CONF.command.bootstrap_service_name) self.admin_url = ( os.environ.get('OS_BOOTSTRAP_ADMIN_URL') or CONF.command.bootstrap_admin_url) self.public_url = ( os.environ.get('OS_BOOTSTRAP_PUBLIC_URL') or CONF.command.bootstrap_public_url) self.internal_url = ( os.environ.get('OS_BOOTSTRAP_INTERNAL_URL') or CONF.command.bootstrap_internal_url) self.region_id = ( os.environ.get('OS_BOOTSTRAP_REGION_ID') or CONF.command.bootstrap_region_id) def do_bootstrap(self): """Perform the bootstrap actions. Create bootstrap user, project, and role so that CMS, humans, or scripts can continue to perform initial setup (domains, projects, services, endpoints, etc) of Keystone when standing up a new deployment. """ self._get_config() if self.password is None: print(_('Either --bootstrap-password argument or ' 'OS_BOOTSTRAP_PASSWORD must be set.')) raise ValueError # NOTE(morganfainberg): Ensure the default domain is in-fact created default_domain = { 'id': CONF.identity.default_domain_id, 'name': 'Default', 'enabled': True, 'description': 'The default domain' } try: self.resource_manager.create_domain( domain_id=default_domain['id'], domain=default_domain) LOG.info(_LI('Created domain %s'), default_domain['id']) except exception.Conflict: # NOTE(morganfainberg): Domain already exists, continue on. LOG.info(_LI('Domain %s already exists, skipping creation.'), default_domain['id']) try: self.resource_manager.create_project( project_id=self.project_id, project={'enabled': True, 'id': self.project_id, 'domain_id': default_domain['id'], 'description': 'Bootstrap project for initializing ' 'the cloud.', 'name': self.project_name} ) LOG.info(_LI('Created project %s'), self.project_name) except exception.Conflict: LOG.info(_LI('Project %s already exists, skipping creation.'), self.project_name) project = self.resource_manager.get_project_by_name( self.project_name, default_domain['id']) self.project_id = project['id'] # NOTE(morganfainberg): Do not create the user if it already exists. try: user = self.identity_manager.get_user_by_name(self.username, default_domain['id']) LOG.info(_LI('User %s already exists, skipping creation.'), self.username) except exception.UserNotFound: user = self.identity_manager.create_user( user_ref={'name': self.username, 'enabled': True, 'domain_id': default_domain['id'], 'password': self.password } ) LOG.info(_LI('Created user %s'), self.username) # NOTE(morganfainberg): Do not create the role if it already exists. try: self.role_manager.create_role( role_id=self.role_id, role={'name': self.role_name, 'id': self.role_id}, ) LOG.info(_LI('Created Role %s'), self.role_name) except exception.Conflict: LOG.info(_LI('Role %s exists, skipping creation.'), self.role_name) # NOTE(davechen): There is no backend method to get the role # by name, so build the hints to list the roles and filter by # name instead. hints = driver_hints.Hints() hints.add_filter('name', self.role_name) role = self.role_manager.list_roles(hints) self.role_id = role[0]['id'] # NOTE(morganfainberg): Handle the case that the role assignment has # already occurred. try: self.assignment_manager.add_role_to_user_and_project( user_id=user['id'], tenant_id=self.project_id, role_id=self.role_id ) LOG.info(_LI('Granted %(role)s on %(project)s to user' ' %(username)s.'), {'role': self.role_name, 'project': self.project_name, 'username': self.username}) except exception.Conflict: LOG.info(_LI('User %(username)s already has %(role)s on ' '%(project)s.'), {'username': self.username, 'role': self.role_name, 'project': self.project_name}) if self.region_id: try: self.catalog_manager.create_region( region_ref={'id': self.region_id} ) LOG.info(_LI('Created Region %s'), self.region_id) except exception.Conflict: LOG.info(_LI('Region %s exists, skipping creation.'), self.region_id) if self.public_url or self.admin_url or self.internal_url: hints = driver_hints.Hints() hints.add_filter('type', 'identity') services = self.catalog_manager.list_services(hints) if services: service_ref = services[0] hints = driver_hints.Hints() hints.add_filter('service_id', service_ref['id']) if self.region_id: hints.add_filter('region_id', self.region_id) endpoints = self.catalog_manager.list_endpoints(hints) else: service_ref = {'id': uuid.uuid4().hex, 'name': self.service_name, 'type': 'identity', 'enabled': True} self.catalog_manager.create_service( service_id=service_ref['id'], service_ref=service_ref) endpoints = [] self.service_id = service_ref['id'] available_interfaces = {e['interface']: e for e in endpoints} expected_endpoints = {'public': self.public_url, 'internal': self.internal_url, 'admin': self.admin_url} for interface, url in expected_endpoints.items(): if not url: # not specified to bootstrap command continue try: endpoint_ref = available_interfaces[interface] except KeyError: endpoint_ref = {'id': uuid.uuid4().hex, 'interface': interface, 'url': url, 'service_id': self.service_id, 'enabled': True} if self.region_id: endpoint_ref['region_id'] = self.region_id self.catalog_manager.create_endpoint( endpoint_id=endpoint_ref['id'], endpoint_ref=endpoint_ref) LOG.info(_LI('Created %(interface)s endpoint %(url)s'), {'interface': interface, 'url': url}) else: # NOTE(jamielennox): electing not to update existing # endpoints here. There may be call to do so in future. LOG.info(_LI('Skipping %s endpoint as already created'), interface) self.endpoints[interface] = endpoint_ref['id'] @classmethod def main(cls): klass = cls() klass.do_bootstrap() class DbSync(BaseApp): """Sync the database.""" name = 'db_sync' @classmethod def add_argument_parser(cls, subparsers): parser = super(DbSync, cls).add_argument_parser(subparsers) parser.add_argument('version', default=None, nargs='?', help=('Migrate the database up to a specified ' 'version. If not provided, db_sync will ' 'migrate the database to the latest known ' 'version. Schema downgrades are not ' 'supported.')) parser.add_argument('--extension', default=None, help=('Migrate the database for the specified ' 'extension. If not provided, db_sync will ' 'migrate the common repository.')) return parser @staticmethod def main(): version = CONF.command.version extension = CONF.command.extension migration_helpers.sync_database_to_version(extension, version) class DbVersion(BaseApp): """Print the current migration version of the database.""" name = 'db_version' @classmethod def add_argument_parser(cls, subparsers): parser = super(DbVersion, cls).add_argument_parser(subparsers) parser.add_argument('--extension', default=None, help=('Print the migration version of the ' 'database for the specified extension. If ' 'not provided, print it for the common ' 'repository.')) @staticmethod def main(): extension = CONF.command.extension migration_helpers.print_db_version(extension) class BasePermissionsSetup(BaseApp): """Common user/group setup for file permissions.""" @classmethod def add_argument_parser(cls, subparsers): parser = super(BasePermissionsSetup, cls).add_argument_parser(subparsers) running_as_root = (os.geteuid() == 0) parser.add_argument('--keystone-user', required=running_as_root) parser.add_argument('--keystone-group', required=running_as_root) return parser @staticmethod def get_user_group(): keystone_user_id = None keystone_group_id = None try: a = CONF.command.keystone_user if a: keystone_user_id = utils.get_unix_user(a)[0] except KeyError: raise ValueError("Unknown user '%s' in --keystone-user" % a) try: a = CONF.command.keystone_group if a: keystone_group_id = utils.get_unix_group(a)[0] except KeyError: raise ValueError("Unknown group '%s' in --keystone-group" % a) return keystone_user_id, keystone_group_id class BaseCertificateSetup(BasePermissionsSetup): """Provides common options for certificate setup.""" @classmethod def add_argument_parser(cls, subparsers): parser = super(BaseCertificateSetup, cls).add_argument_parser(subparsers) parser.add_argument('--rebuild', default=False, action='store_true', help=('Rebuild certificate files: erase previous ' 'files and regenerate them.')) return parser class PKISetup(BaseCertificateSetup): """Set up Key pairs and certificates for token signing and verification. This is NOT intended for production use, see Keystone Configuration documentation for details. As of the Mitaka release, this command has been DEPRECATED and may be removed in the 'O' release. """ name = 'pki_setup' @classmethod def main(cls): versionutils.report_deprecated_feature( LOG, _LW("keystone-manage pki_setup is deprecated as of Mitaka in " "favor of not using PKI tokens and may be removed in 'O' " "release.")) LOG.warning(_LW('keystone-manage pki_setup is not recommended for ' 'production use.')) keystone_user_id, keystone_group_id = cls.get_user_group() conf_pki = openssl.ConfigurePKI(keystone_user_id, keystone_group_id, rebuild=CONF.command.rebuild) conf_pki.run() class SSLSetup(BaseCertificateSetup): """Create key pairs and certificates for HTTPS connections. This is NOT intended for production use, see Keystone Configuration documentation for details. """ name = 'ssl_setup' @classmethod def main(cls): LOG.warning(_LW('keystone-manage ssl_setup is not recommended for ' 'production use.')) keystone_user_id, keystone_group_id = cls.get_user_group() conf_ssl = openssl.ConfigureSSL(keystone_user_id, keystone_group_id, rebuild=CONF.command.rebuild) conf_ssl.run() class FernetSetup(BasePermissionsSetup): """Setup a key repository for Fernet tokens. This also creates a primary key used for both creating and validating Fernet tokens. To improve security, you should rotate your keys (using keystone-manage fernet_rotate, for example). """ name = 'fernet_setup' @classmethod def main(cls): from keystone.token.providers.fernet import utils as fernet keystone_user_id, keystone_group_id = cls.get_user_group() fernet.create_key_directory(keystone_user_id, keystone_group_id) if fernet.validate_key_repository(requires_write=True): fernet.initialize_key_repository( keystone_user_id, keystone_group_id) class FernetRotate(BasePermissionsSetup): """Rotate Fernet encryption keys. This assumes you have already run keystone-manage fernet_setup. A new primary key is placed into rotation, which is used for new tokens. The old primary key is demoted to secondary, which can then still be used for validating tokens. Excess secondary keys (beyond [fernet_tokens] max_active_keys) are revoked. Revoked keys are permanently deleted. A new staged key will be created and used to validate tokens. The next time key rotation takes place, the staged key will be put into rotation as the primary key. Rotating keys too frequently, or with [fernet_tokens] max_active_keys set too low, will cause tokens to become invalid prior to their expiration. """ name = 'fernet_rotate' @classmethod def main(cls): from keystone.token.providers.fernet import utils as fernet keystone_user_id, keystone_group_id = cls.get_user_group() if fernet.validate_key_repository(requires_write=True): fernet.rotate_keys(keystone_user_id, keystone_group_id) class TokenFlush(BaseApp): """Flush expired tokens from the backend.""" name = 'token_flush' @classmethod def main(cls): token_manager = token.persistence.PersistenceManager() token_manager.flush_expired_tokens() class MappingPurge(BaseApp): """Purge the mapping table.""" name = 'mapping_purge' @classmethod def add_argument_parser(cls, subparsers): parser = super(MappingPurge, cls).add_argument_parser(subparsers) parser.add_argument('--all', default=False, action='store_true', help=('Purge all mappings.')) parser.add_argument('--domain-name', default=None, help=('Purge any mappings for the domain ' 'specified.')) parser.add_argument('--public-id', default=None, help=('Purge the mapping for the Public ID ' 'specified.')) parser.add_argument('--local-id', default=None, help=('Purge the mappings for the Local ID ' 'specified.')) parser.add_argument('--type', default=None, choices=['user', 'group'], help=('Purge any mappings for the type ' 'specified.')) return parser @staticmethod def main(): def validate_options(): # NOTE(henry-nash): It would be nice to use the argparse automated # checking for this validation, but the only way I can see doing # that is to make the default (i.e. if no optional parameters # are specified) to purge all mappings - and that sounds too # dangerous as a default. So we use it in a slightly # unconventional way, where all parameters are optional, but you # must specify at least one. if (CONF.command.all is False and CONF.command.domain_name is None and CONF.command.public_id is None and CONF.command.local_id is None and CONF.command.type is None): raise ValueError(_('At least one option must be provided')) if (CONF.command.all is True and (CONF.command.domain_name is not None or CONF.command.public_id is not None or CONF.command.local_id is not None or CONF.command.type is not None)): raise ValueError(_('--all option cannot be mixed with ' 'other options')) def get_domain_id(name): try: return resource_manager.get_domain_by_name(name)['id'] except KeyError: raise ValueError(_("Unknown domain '%(name)s' specified by " "--domain-name") % {'name': name}) validate_options() drivers = backends.load_backends() resource_manager = drivers['resource_api'] mapping_manager = drivers['id_mapping_api'] # Now that we have validated the options, we know that at least one # option has been specified, and if it was the --all option then this # was the only option specified. # # The mapping dict is used to filter which mappings are purged, so # leaving it empty means purge them all mapping = {} if CONF.command.domain_name is not None: mapping['domain_id'] = get_domain_id(CONF.command.domain_name) if CONF.command.public_id is not None: mapping['public_id'] = CONF.command.public_id if CONF.command.local_id is not None: mapping['local_id'] = CONF.command.local_id if CONF.command.type is not None: mapping['type'] = CONF.command.type mapping_manager.purge_mappings(mapping) DOMAIN_CONF_FHEAD = 'keystone.' DOMAIN_CONF_FTAIL = '.conf' def _domain_config_finder(conf_dir): """Return a generator of all domain config files found in a directory. Donmain configs match the filename pattern of 'keystone..conf'. :returns: generator yeilding (filename, domain_name) tuples """ LOG.info(_LI('Scanning %r for domain config files'), conf_dir) for r, d, f in os.walk(conf_dir): for fname in f: if (fname.startswith(DOMAIN_CONF_FHEAD) and fname.endswith(DOMAIN_CONF_FTAIL)): if fname.count('.') >= 2: domain_name = fname[len(DOMAIN_CONF_FHEAD): -len(DOMAIN_CONF_FTAIL)] yield (os.path.join(r, fname), domain_name) continue LOG.warning(_LW('Ignoring file (%s) while scanning ' 'domain config directory'), fname) class DomainConfigUploadFiles(object): def __init__(self, domain_config_finder=_domain_config_finder): super(DomainConfigUploadFiles, self).__init__() self.load_backends() self._domain_config_finder = domain_config_finder def load_backends(self): drivers = backends.load_backends() self.resource_manager = drivers['resource_api'] self.domain_config_manager = drivers['domain_config_api'] def valid_options(self): """Validate the options, returning True if they are indeed valid. It would be nice to use the argparse automated checking for this validation, but the only way I can see doing that is to make the default (i.e. if no optional parameters are specified) to upload all configuration files - and that sounds too dangerous as a default. So we use it in a slightly unconventional way, where all parameters are optional, but you must specify at least one. """ if (CONF.command.all is False and CONF.command.domain_name is None): print(_('At least one option must be provided, use either ' '--all or --domain-name')) raise ValueError if (CONF.command.all is True and CONF.command.domain_name is not None): print(_('The --all option cannot be used with ' 'the --domain-name option')) raise ValueError def upload_config_to_database(self, file_name, domain_name): """Upload a single config file to the database. :param file_name: the file containing the config options :param domain_name: the domain name :raises ValueError: the domain does not exist or already has domain specific configurations defined. :raises Exceptions from oslo config: there is an issue with options defined in the config file or its format. The caller of this method should catch the errors raised and handle appropriately in order that the best UX experience can be provided for both the case of when a user has asked for a specific config file to be uploaded, as well as all config files in a directory. """ try: domain_ref = ( self.resource_manager.get_domain_by_name(domain_name)) except exception.DomainNotFound: print(_('Invalid domain name: %(domain)s found in config file ' 'name: %(file)s - ignoring this file.') % { 'domain': domain_name, 'file': file_name}) raise ValueError if self.domain_config_manager.get_config_with_sensitive_info( domain_ref['id']): print(_('Domain: %(domain)s already has a configuration ' 'defined - ignoring file: %(file)s.') % { 'domain': domain_name, 'file': file_name}) raise ValueError sections = {} try: parser = cfg.ConfigParser(file_name, sections) parser.parse() except Exception: # We explicitly don't try and differentiate the error cases, in # order to keep the code in this tool more robust as oslo.config # changes. print(_('Error parsing configuration file for domain: %(domain)s, ' 'file: %(file)s.') % { 'domain': domain_name, 'file': file_name}) raise for group in sections: for option in sections[group]: sections[group][option] = sections[group][option][0] self.domain_config_manager.create_config(domain_ref['id'], sections) def upload_configs_to_database(self, file_name, domain_name): """Upload configs from file and load into database. This method will be called repeatedly for all the config files in the config directory. To provide a better UX, we differentiate the error handling in this case (versus when the user has asked for a single config file to be uploaded). """ try: self.upload_config_to_database(file_name, domain_name) except ValueError: # nosec # We've already given all the info we can in a message, so carry # on to the next one pass except Exception: # Some other error occurred relating to this specific config file # or domain. Since we are trying to upload all the config files, # we'll continue and hide this exception. However, we tell the # user how to get more info about this error by re-running with # just the domain at fault. When we run in single-domain mode we # will NOT hide the exception. print(_('To get a more detailed information on this error, re-run ' 'this command for the specific domain, i.e.: ' 'keystone-manage domain_config_upload --domain-name %s') % domain_name) pass def read_domain_configs_from_files(self): """Read configs from file(s) and load into database. The command line parameters have already been parsed and the CONF command option will have been set. It is either set to the name of an explicit domain, or it's None to indicate that we want all domain config files. """ domain_name = CONF.command.domain_name conf_dir = CONF.identity.domain_config_dir if not os.path.exists(conf_dir): print(_('Unable to locate domain config directory: %s') % conf_dir) raise ValueError if domain_name: # Request is to upload the configs for just one domain fname = DOMAIN_CONF_FHEAD + domain_name + DOMAIN_CONF_FTAIL self.upload_config_to_database( os.path.join(conf_dir, fname), domain_name) return for filename, domain_name in self._domain_config_finder(conf_dir): self.upload_configs_to_database(filename, domain_name) def run(self): # First off, let's just check we can talk to the domain database try: self.resource_manager.list_domains(driver_hints.Hints()) except Exception: # It is likely that there is some SQL or other backend error # related to set up print(_('Unable to access the keystone database, please check it ' 'is configured correctly.')) raise try: self.valid_options() self.read_domain_configs_from_files() except ValueError: # We will already have printed out a nice message, so indicate # to caller the non-success error code to be used. return 1 class DomainConfigUpload(BaseApp): """Upload the domain specific configuration files to the database.""" name = 'domain_config_upload' @classmethod def add_argument_parser(cls, subparsers): parser = super(DomainConfigUpload, cls).add_argument_parser(subparsers) parser.add_argument('--all', default=False, action='store_true', help='Upload contents of all domain specific ' 'configuration files. Either use this option ' 'or use the --domain-name option to choose a ' 'specific domain.') parser.add_argument('--domain-name', default=None, help='Upload contents of the specific ' 'configuration file for the given domain. ' 'Either use this option or use the --all ' 'option to upload contents for all domains.') return parser @staticmethod def main(): dcu = DomainConfigUploadFiles() status = dcu.run() if status is not None: sys.exit(status) class SamlIdentityProviderMetadata(BaseApp): """Generate Identity Provider metadata.""" name = 'saml_idp_metadata' @staticmethod def main(): metadata = idp.MetadataGenerator().generate_metadata() print(metadata.to_string()) class MappingEngineTester(BaseApp): """Execute mapping engine locally.""" name = 'mapping_engine' @staticmethod def read_rules(path): try: with open(path) as file: return jsonutils.load(file) except ValueError as e: raise SystemExit(_('Error while parsing rules ' '%(path)s: %(err)s') % {'path': path, 'err': e}) @staticmethod def read_file(path): try: with open(path) as file: return file.read().strip() except IOError as e: raise SystemExit(_("Error while opening file " "%(path)s: %(err)s") % {'path': path, 'err': e}) @staticmethod def normalize_assertion(assertion): def split(line): try: k, v = line.split(':', 1) return k.strip(), v.strip() except ValueError as e: msg = _("Error while parsing line: '%(line)s': %(err)s") raise SystemExit(msg % {'line': line, 'err': e}) assertion = assertion.split('\n') assertion_dict = {} prefix = CONF.command.prefix for line in assertion: k, v = split(line) if prefix: if k.startswith(prefix): assertion_dict[k] = v else: assertion_dict[k] = v return assertion_dict @staticmethod def normalize_rules(rules): if isinstance(rules, list): return {'rules': rules} else: return rules @classmethod def main(cls): if not CONF.command.engine_debug: mapping_engine.LOG.logger.setLevel('WARN') rules = MappingEngineTester.read_rules(CONF.command.rules) rules = MappingEngineTester.normalize_rules(rules) mapping_engine.validate_mapping_structure(rules) assertion = MappingEngineTester.read_file(CONF.command.input) assertion = MappingEngineTester.normalize_assertion(assertion) rp = mapping_engine.RuleProcessor(rules['rules']) print(jsonutils.dumps(rp.process(assertion), indent=2)) @classmethod def add_argument_parser(cls, subparsers): parser = super(MappingEngineTester, cls).add_argument_parser(subparsers) parser.add_argument('--rules', default=None, required=True, help=("Path to the file with " "rules to be executed. " "Content must be a proper JSON structure, " "with a top-level key 'rules' and " "corresponding value being a list.")) parser.add_argument('--input', default=None, required=True, help=("Path to the file with input attributes. " "The content consists of ':' separated " "parameter names and their values. " "There is only one key-value pair per line. " "A ';' in the value is a separator and then " "a value is treated as a list. Example:\n " "EMAIL: me@example.com\n" "LOGIN: me\n" "GROUPS: group1;group2;group3")) parser.add_argument('--prefix', default=None, help=("A prefix used for each environment " "variable in the assertion. For example, " "all environment variables may have the " "prefix ASDF_.")) parser.add_argument('--engine-debug', default=False, action="store_true", help=("Enable debug messages from the mapping " "engine.")) CMDS = [ BootStrap, DbSync, DbVersion, DomainConfigUpload, FernetRotate, FernetSetup, MappingPurge, MappingEngineTester, PKISetup, SamlIdentityProviderMetadata, SSLSetup, TokenFlush, ] def add_command_parsers(subparsers): for cmd in CMDS: cmd.add_argument_parser(subparsers) command_opt = cfg.SubCommandOpt('command', title='Commands', help='Available commands', handler=add_command_parsers) def main(argv=None, config_files=None): CONF.register_cli_opt(command_opt) config.configure() sql.initialize() config.set_default_for_default_log_levels() CONF(args=argv[1:], project='keystone', version=pbr.version.VersionInfo('keystone').version_string(), usage='%(prog)s [' + '|'.join([cmd.name for cmd in CMDS]) + ']', default_config_files=config_files) config.setup_logging() CONF.command.cmd_class.main() keystone-9.0.0/keystone/cmd/manage.py0000664000567000056710000000311612701407102020723 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import sys # If ../../keystone/__init__.py exists, add ../../ to Python search path, so # that it will override what happens to be installed in # /usr/(local/)lib/python... possible_topdir = os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir, os.pardir, os.pardir)) if os.path.exists(os.path.join(possible_topdir, 'keystone', '__init__.py')): sys.path.insert(0, possible_topdir) from keystone.cmd import cli from keystone.common import environment # entry point. def main(): environment.use_stdlib() dev_conf = os.path.join(possible_topdir, 'etc', 'keystone.conf') config_files = None if os.path.exists(dev_conf): config_files = [dev_conf] cli.main(argv=sys.argv, config_files=config_files) keystone-9.0.0/keystone/cmd/all.py0000664000567000056710000000247612701407102020253 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import sys # If ../../keystone/__init__.py exists, add ../../ to Python search path, so # that it will override what happens to be installed in # /usr/(local/)lib/python... possible_topdir = os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir, os.pardir, os.pardir)) if os.path.exists(os.path.join(possible_topdir, 'keystone', '__init__.py')): sys.path.insert(0, possible_topdir) from keystone.server import eventlet as eventlet_server # entry point. def main(): eventlet_server.run(possible_topdir) keystone-9.0.0/keystone/cmd/__init__.py0000664000567000056710000000000012701407102021217 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/token/0000775000567000056710000000000012701407246017506 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/token/utils.py0000664000567000056710000000204712701407102021212 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystoneclient.common import cms from oslo_config import cfg def generate_unique_id(token_id): """Return a unique ID for a token. The returned value is useful as the primary key of a database table, memcache store, or other lookup table. :returns: Given a PKI token, returns it's hashed value. Otherwise, returns the passed-in value (such as a UUID token ID or an existing hash). """ return cms.cms_hash_token(token_id, mode=cfg.CONF.token.hash_algorithm) keystone-9.0.0/keystone/token/__init__.py0000664000567000056710000000132412701407102021606 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.token import controllers # noqa from keystone.token import persistence # noqa from keystone.token import provider # noqa keystone-9.0.0/keystone/token/_simple_cert.py0000664000567000056710000000630112701407102022514 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # TODO(morganfainberg): Remove this file and extension in the "O" release as # it is only used in support of the PKI/PKIz token providers. import functools from oslo_config import cfg import webob from keystone.common import controller from keystone.common import dependency from keystone.common import extension from keystone.common import json_home from keystone.common import wsgi from keystone import exception CONF = cfg.CONF EXTENSION_DATA = { 'name': 'OpenStack Simple Certificate API', 'namespace': 'http://docs.openstack.org/identity/api/ext/' 'OS-SIMPLE-CERT/v1.0', 'alias': 'OS-SIMPLE-CERT', 'updated': '2014-01-20T12:00:0-00:00', 'description': 'OpenStack simple certificate retrieval extension', 'links': [ { 'rel': 'describedby', 'type': 'text/html', 'href': 'http://developer.openstack.org/' 'api-ref-identity-v2-ext.html', } ]} extension.register_admin_extension(EXTENSION_DATA['alias'], EXTENSION_DATA) extension.register_public_extension(EXTENSION_DATA['alias'], EXTENSION_DATA) build_resource_relation = functools.partial( json_home.build_v3_extension_resource_relation, extension_name='OS-SIMPLE-CERT', extension_version='1.0') class Routers(wsgi.RoutersBase): def _construct_url(self, suffix): return "/OS-SIMPLE-CERT/%s" % suffix def append_v3_routers(self, mapper, routers): controller = SimpleCert() self._add_resource( mapper, controller, path=self._construct_url('ca'), get_action='get_ca_certificate', rel=build_resource_relation(resource_name='ca_certificate')) self._add_resource( mapper, controller, path=self._construct_url('certificates'), get_action='list_certificates', rel=build_resource_relation(resource_name='certificates')) @dependency.requires('token_provider_api') class SimpleCert(controller.V3Controller): def _get_certificate(self, name): try: with open(name, 'r') as f: body = f.read() except IOError: raise exception.CertificateFilesUnavailable() # NOTE(jamielennox): We construct the webob Response ourselves here so # that we don't pass through the JSON encoding process. headers = [('Content-Type', 'application/x-pem-file')] return webob.Response(body=body, headerlist=headers, status="200 OK") def get_ca_certificate(self, context): return self._get_certificate(CONF.signing.ca_certs) def list_certificates(self, context): return self._get_certificate(CONF.signing.certfile) keystone-9.0.0/keystone/token/persistence/0000775000567000056710000000000012701407246022032 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/token/persistence/backends/0000775000567000056710000000000012701407246023604 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/token/persistence/backends/memcache_pool.py0000664000567000056710000000235612701407102026746 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import versionutils from keystone.token.persistence.backends import memcache CONF = cfg.CONF class Token(memcache.Token): memcached_backend = 'pooled_memcached' @versionutils.deprecated( what='Memcache Pool Token Persistence Driver', as_of=versionutils.deprecated.MITAKA, in_favor_of='fernet token driver (no-persistence)', remove_in=0) def __init__(self, *args, **kwargs): for arg in ('dead_retry', 'socket_timeout', 'pool_maxsize', 'pool_unused_timeout', 'pool_connection_get_timeout'): kwargs[arg] = getattr(CONF.memcache, arg) super(Token, self).__init__(*args, **kwargs) keystone-9.0.0/keystone/token/persistence/backends/__init__.py0000664000567000056710000000000012701407102025672 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/token/persistence/backends/sql.py0000664000567000056710000002656512701407102024762 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import functools from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils from keystone.common import sql from keystone import exception from keystone.i18n import _LI from keystone import token from keystone.token import provider CONF = cfg.CONF LOG = log.getLogger(__name__) class TokenModel(sql.ModelBase, sql.DictBase): __tablename__ = 'token' attributes = ['id', 'expires', 'user_id', 'trust_id'] id = sql.Column(sql.String(64), primary_key=True) expires = sql.Column(sql.DateTime(), default=None) extra = sql.Column(sql.JsonBlob()) valid = sql.Column(sql.Boolean(), default=True, nullable=False) user_id = sql.Column(sql.String(64)) trust_id = sql.Column(sql.String(64)) __table_args__ = ( sql.Index('ix_token_expires', 'expires'), sql.Index('ix_token_expires_valid', 'expires', 'valid'), sql.Index('ix_token_user_id', 'user_id'), sql.Index('ix_token_trust_id', 'trust_id') ) def _expiry_range_batched(session, upper_bound_func, batch_size): """Returns the stop point of the next batch for expiration. Return the timestamp of the next token that is `batch_size` rows from being the oldest expired token. """ # This expiry strategy splits the tokens into roughly equal sized batches # to be deleted. It does this by finding the timestamp of a token # `batch_size` rows from the oldest token and yielding that to the caller. # It's expected that the caller will then delete all rows with a timestamp # equal to or older than the one yielded. This may delete slightly more # tokens than the batch_size, but that should be ok in almost all cases. LOG.debug('Token expiration batch size: %d', batch_size) query = session.query(TokenModel.expires) query = query.filter(TokenModel.expires < upper_bound_func()) query = query.order_by(TokenModel.expires) query = query.offset(batch_size - 1) query = query.limit(1) while True: try: next_expiration = query.one()[0] except sql.NotFound: # There are less than `batch_size` rows remaining, so fall # through to the normal delete break yield next_expiration yield upper_bound_func() def _expiry_range_all(session, upper_bound_func): """Expires all tokens in one pass.""" yield upper_bound_func() class Token(token.persistence.TokenDriverV8): # Public interface def get_token(self, token_id): if token_id is None: raise exception.TokenNotFound(token_id=token_id) with sql.session_for_read() as session: token_ref = session.query(TokenModel).get(token_id) if not token_ref or not token_ref.valid: raise exception.TokenNotFound(token_id=token_id) return token_ref.to_dict() def create_token(self, token_id, data): data_copy = copy.deepcopy(data) if not data_copy.get('expires'): data_copy['expires'] = provider.default_expire_time() if not data_copy.get('user_id'): data_copy['user_id'] = data_copy['user']['id'] token_ref = TokenModel.from_dict(data_copy) token_ref.valid = True with sql.session_for_write() as session: session.add(token_ref) return token_ref.to_dict() def delete_token(self, token_id): with sql.session_for_write() as session: token_ref = session.query(TokenModel).get(token_id) if not token_ref or not token_ref.valid: raise exception.TokenNotFound(token_id=token_id) token_ref.valid = False def delete_tokens(self, user_id, tenant_id=None, trust_id=None, consumer_id=None): """Deletes all tokens in one session The user_id will be ignored if the trust_id is specified. user_id will always be specified. If using a trust, the token's user_id is set to the trustee's user ID or the trustor's user ID, so will use trust_id to query the tokens. """ token_list = [] with sql.session_for_write() as session: now = timeutils.utcnow() query = session.query(TokenModel) query = query.filter_by(valid=True) query = query.filter(TokenModel.expires > now) if trust_id: query = query.filter(TokenModel.trust_id == trust_id) else: query = query.filter(TokenModel.user_id == user_id) for token_ref in query.all(): if tenant_id: token_ref_dict = token_ref.to_dict() if not self._tenant_matches(tenant_id, token_ref_dict): continue if consumer_id: token_ref_dict = token_ref.to_dict() if not self._consumer_matches(consumer_id, token_ref_dict): continue token_ref.valid = False token_list.append(token_ref.id) return token_list def _tenant_matches(self, tenant_id, token_ref_dict): return ((tenant_id is None) or (token_ref_dict.get('tenant') and token_ref_dict['tenant'].get('id') == tenant_id)) def _consumer_matches(self, consumer_id, ref): if consumer_id is None: return True else: try: oauth = ref['token_data']['token'].get('OS-OAUTH1', {}) return oauth and oauth['consumer_id'] == consumer_id except KeyError: return False def _list_tokens_for_trust(self, trust_id): with sql.session_for_read() as session: tokens = [] now = timeutils.utcnow() query = session.query(TokenModel) query = query.filter(TokenModel.expires > now) query = query.filter(TokenModel.trust_id == trust_id) token_references = query.filter_by(valid=True) for token_ref in token_references: token_ref_dict = token_ref.to_dict() tokens.append(token_ref_dict['id']) return tokens def _list_tokens_for_user(self, user_id, tenant_id=None): with sql.session_for_read() as session: tokens = [] now = timeutils.utcnow() query = session.query(TokenModel) query = query.filter(TokenModel.expires > now) query = query.filter(TokenModel.user_id == user_id) token_references = query.filter_by(valid=True) for token_ref in token_references: token_ref_dict = token_ref.to_dict() if self._tenant_matches(tenant_id, token_ref_dict): tokens.append(token_ref['id']) return tokens def _list_tokens_for_consumer(self, user_id, consumer_id): tokens = [] with sql.session_for_write() as session: now = timeutils.utcnow() query = session.query(TokenModel) query = query.filter(TokenModel.expires > now) query = query.filter(TokenModel.user_id == user_id) token_references = query.filter_by(valid=True) for token_ref in token_references: token_ref_dict = token_ref.to_dict() if self._consumer_matches(consumer_id, token_ref_dict): tokens.append(token_ref_dict['id']) return tokens def _list_tokens(self, user_id, tenant_id=None, trust_id=None, consumer_id=None): if not CONF.token.revoke_by_id: return [] if trust_id: return self._list_tokens_for_trust(trust_id) if consumer_id: return self._list_tokens_for_consumer(user_id, consumer_id) else: return self._list_tokens_for_user(user_id, tenant_id) def list_revoked_tokens(self): with sql.session_for_read() as session: tokens = [] now = timeutils.utcnow() query = session.query(TokenModel.id, TokenModel.expires, TokenModel.extra) query = query.filter(TokenModel.expires > now) token_references = query.filter_by(valid=False) for token_ref in token_references: token_data = token_ref[2]['token_data'] if 'access' in token_data: # It's a v2 token. audit_ids = token_data['access']['token']['audit_ids'] else: # It's a v3 token. audit_ids = token_data['token']['audit_ids'] record = { 'id': token_ref[0], 'expires': token_ref[1], 'audit_id': audit_ids[0], } tokens.append(record) return tokens def _expiry_range_strategy(self, dialect): """Choose a token range expiration strategy Based on the DB dialect, select an expiry range callable that is appropriate. """ # DB2 and MySQL can both benefit from a batched strategy. On DB2 the # transaction log can fill up and on MySQL w/Galera, large # transactions can exceed the maximum write set size. if dialect == 'ibm_db_sa': # Limit of 100 is known to not fill a transaction log # of default maximum size while not significantly # impacting the performance of large token purges on # systems where the maximum transaction log size has # been increased beyond the default. return functools.partial(_expiry_range_batched, batch_size=100) elif dialect == 'mysql': # We want somewhat more than 100, since Galera replication delay is # at least RTT*2. This can be a significant amount of time if # doing replication across a WAN. return functools.partial(_expiry_range_batched, batch_size=1000) return _expiry_range_all def flush_expired_tokens(self): with sql.session_for_write() as session: dialect = session.bind.dialect.name expiry_range_func = self._expiry_range_strategy(dialect) query = session.query(TokenModel.expires) total_removed = 0 upper_bound_func = timeutils.utcnow for expiry_time in expiry_range_func(session, upper_bound_func): delete_query = query.filter(TokenModel.expires <= expiry_time) row_count = delete_query.delete(synchronize_session=False) total_removed += row_count LOG.debug('Removed %d total expired tokens', total_removed) session.flush() LOG.info(_LI('Total expired tokens removed: %d'), total_removed) keystone-9.0.0/keystone/token/persistence/backends/memcache.py0000664000567000056710000000255412701407102025715 0ustar jenkinsjenkins00000000000000# Copyright 2013 Metacloud, Inc. # Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import versionutils from keystone.token.persistence.backends import kvs CONF = cfg.CONF class Token(kvs.Token): kvs_backend = 'openstack.kvs.Memcached' memcached_backend = 'memcached' @versionutils.deprecated( what='Memcache Token Persistence Driver', as_of=versionutils.deprecated.MITAKA, in_favor_of='fernet token driver (no-persistence)', remove_in=0) def __init__(self, *args, **kwargs): kwargs['memcached_backend'] = self.memcached_backend kwargs['no_expiry_keys'] = [self.revocation_key] kwargs['memcached_expire_time'] = CONF.token.expiration kwargs['url'] = CONF.memcache.servers super(Token, self).__init__(*args, **kwargs) keystone-9.0.0/keystone/token/persistence/backends/kvs.py0000664000567000056710000003573212701407105024765 0ustar jenkinsjenkins00000000000000# Copyright 2013 Metacloud, Inc. # Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import absolute_import import copy from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils import six from keystone.common import kvs from keystone.common import utils from keystone import exception from keystone.i18n import _, _LE, _LW from keystone import token from keystone.token import provider CONF = cfg.CONF LOG = log.getLogger(__name__) class Token(token.persistence.TokenDriverV8): """KeyValueStore backend for tokens. This is the base implementation for any/all key-value-stores (e.g. memcached) for the Token backend. It is recommended to only use the base in-memory implementation for testing purposes. """ revocation_key = 'revocation-list' kvs_backend = 'openstack.kvs.Memory' def __init__(self, backing_store=None, **kwargs): super(Token, self).__init__() self._store = kvs.get_key_value_store('token-driver') if backing_store is not None: self.kvs_backend = backing_store if not self._store.is_configured: # Do not re-configure the backend if the store has been initialized self._store.configure(backing_store=self.kvs_backend, **kwargs) if self.__class__ == Token: # NOTE(morganfainberg): Only warn if the base KVS implementation # is instantiated. LOG.warning(_LW('It is recommended to only use the base ' 'key-value-store implementation for the token ' 'driver for testing purposes. Please use ' "'memcache' or 'sql' instead.")) def _prefix_token_id(self, token_id): return 'token-%s' % token_id.encode('utf-8') def _prefix_user_id(self, user_id): return 'usertokens-%s' % user_id.encode('utf-8') def _get_key_or_default(self, key, default=None): try: return self._store.get(key) except exception.NotFound: return default def _get_key(self, key): return self._store.get(key) def _set_key(self, key, value, lock=None): self._store.set(key, value, lock) def _delete_key(self, key): return self._store.delete(key) def get_token(self, token_id): ptk = self._prefix_token_id(token_id) try: token_ref = self._get_key(ptk) except exception.NotFound: raise exception.TokenNotFound(token_id=token_id) return token_ref def create_token(self, token_id, data): """Create a token by id and data. It is assumed the caller has performed data validation on the "data" parameter. """ data_copy = copy.deepcopy(data) ptk = self._prefix_token_id(token_id) if not data_copy.get('expires'): data_copy['expires'] = provider.default_expire_time() if not data_copy.get('user_id'): data_copy['user_id'] = data_copy['user']['id'] # NOTE(morganfainberg): for ease of manipulating the data without # concern about the backend, always store the value(s) in the # index as the isotime (string) version so this is where the string is # built. expires_str = utils.isotime(data_copy['expires'], subsecond=True) self._set_key(ptk, data_copy) user_id = data['user']['id'] user_key = self._prefix_user_id(user_id) self._update_user_token_list(user_key, token_id, expires_str) if CONF.trust.enabled and data.get('trust_id'): # NOTE(morganfainberg): If trusts are enabled and this is a trust # scoped token, we add the token to the trustee list as well. This # allows password changes of the trustee to also expire the token. # There is no harm in placing the token in multiple lists, as # _list_tokens is smart enough to handle almost any case of # valid/invalid/expired for a given token. token_data = data_copy['token_data'] if data_copy['token_version'] == token.provider.V2: trustee_user_id = token_data['access']['trust'][ 'trustee_user_id'] elif data_copy['token_version'] == token.provider.V3: trustee_user_id = token_data['OS-TRUST:trust'][ 'trustee_user_id'] else: raise exception.UnsupportedTokenVersionException( _('Unknown token version %s') % data_copy.get('token_version')) trustee_key = self._prefix_user_id(trustee_user_id) self._update_user_token_list(trustee_key, token_id, expires_str) return data_copy def _get_user_token_list_with_expiry(self, user_key): """Return user token list with token expiry. :return: the tuples in the format (token_id, token_expiry) :rtype: list """ return self._get_key_or_default(user_key, default=[]) def _get_user_token_list(self, user_key): """Return a list of token_ids for the user_key.""" token_list = self._get_user_token_list_with_expiry(user_key) # Each element is a tuple of (token_id, token_expiry). Most code does # not care about the expiry, it is stripped out and only a # list of token_ids are returned. return [t[0] for t in token_list] def _update_user_token_list(self, user_key, token_id, expires_isotime_str): current_time = self._get_current_time() revoked_token_list = set([t['id'] for t in self.list_revoked_tokens()]) with self._store.get_lock(user_key) as lock: filtered_list = [] token_list = self._get_user_token_list_with_expiry(user_key) for item in token_list: try: item_id, expires = self._format_token_index_item(item) except (ValueError, TypeError): # NOTE(morganfainberg): Skip on expected errors # possibilities from the `_format_token_index_item` method. continue if expires < current_time: LOG.debug(('Token `%(token_id)s` is expired, removing ' 'from `%(user_key)s`.'), {'token_id': item_id, 'user_key': user_key}) continue if item_id in revoked_token_list: # NOTE(morganfainberg): If the token has been revoked, it # can safely be removed from this list. This helps to keep # the user_token_list as reasonably small as possible. LOG.debug(('Token `%(token_id)s` is revoked, removing ' 'from `%(user_key)s`.'), {'token_id': item_id, 'user_key': user_key}) continue filtered_list.append(item) filtered_list.append((token_id, expires_isotime_str)) self._set_key(user_key, filtered_list, lock) return filtered_list def _get_current_time(self): return timeutils.normalize_time(timeutils.utcnow()) def _add_to_revocation_list(self, data, lock): filtered_list = [] revoked_token_data = {} current_time = self._get_current_time() expires = data['expires'] if isinstance(expires, six.string_types): expires = timeutils.parse_isotime(expires) expires = timeutils.normalize_time(expires) if expires < current_time: LOG.warning(_LW('Token `%s` is expired, not adding to the ' 'revocation list.'), data['id']) return revoked_token_data['expires'] = utils.isotime(expires, subsecond=True) revoked_token_data['id'] = data['id'] token_data = data['token_data'] if 'access' in token_data: # It's a v2 token. audit_ids = token_data['access']['token']['audit_ids'] else: # It's a v3 token. audit_ids = token_data['token']['audit_ids'] revoked_token_data['audit_id'] = audit_ids[0] token_list = self._get_key_or_default(self.revocation_key, default=[]) if not isinstance(token_list, list): # NOTE(morganfainberg): In the case that the revocation list is not # in a format we understand, reinitialize it. This is an attempt to # not allow the revocation list to be completely broken if # somehow the key is changed outside of keystone (e.g. memcache # that is shared by multiple applications). Logging occurs at error # level so that the cloud administrators have some awareness that # the revocation_list needed to be cleared out. In all, this should # be recoverable. Keystone cannot control external applications # from changing a key in some backends, however, it is possible to # gracefully handle and notify of this event. LOG.error(_LE('Reinitializing revocation list due to error ' 'in loading revocation list from backend. ' 'Expected `list` type got `%(type)s`. Old ' 'revocation list data: %(list)r'), {'type': type(token_list), 'list': token_list}) token_list = [] # NOTE(morganfainberg): on revocation, cleanup the expired entries, try # to keep the list of tokens revoked at the minimum. for token_data in token_list: try: expires_at = timeutils.normalize_time( timeutils.parse_isotime(token_data['expires'])) except ValueError: LOG.warning(_LW('Removing `%s` from revocation list due to ' 'invalid expires data in revocation list.'), token_data.get('id', 'INVALID_TOKEN_DATA')) continue if expires_at > current_time: filtered_list.append(token_data) filtered_list.append(revoked_token_data) self._set_key(self.revocation_key, filtered_list, lock) def delete_token(self, token_id): # Test for existence with self._store.get_lock(self.revocation_key) as lock: data = self.get_token(token_id) ptk = self._prefix_token_id(token_id) result = self._delete_key(ptk) self._add_to_revocation_list(data, lock) return result def delete_tokens(self, user_id, tenant_id=None, trust_id=None, consumer_id=None): return super(Token, self).delete_tokens( user_id=user_id, tenant_id=tenant_id, trust_id=trust_id, consumer_id=consumer_id, ) def _format_token_index_item(self, item): try: token_id, expires = item except (TypeError, ValueError): LOG.debug(('Invalid token entry expected tuple of ' '`(, )` got: `%(item)r`'), dict(item=item)) raise try: expires = timeutils.normalize_time( timeutils.parse_isotime(expires)) except ValueError: LOG.debug(('Invalid expires time on token `%(token_id)s`:' ' %(expires)r'), dict(token_id=token_id, expires=expires)) raise return token_id, expires def _token_match_tenant(self, token_ref, tenant_id): if token_ref.get('tenant'): return token_ref['tenant'].get('id') == tenant_id return False def _token_match_trust(self, token_ref, trust_id): if not token_ref.get('trust_id'): return False return token_ref['trust_id'] == trust_id def _token_match_consumer(self, token_ref, consumer_id): try: oauth = token_ref['token_data']['token']['OS-OAUTH1'] return oauth.get('consumer_id') == consumer_id except KeyError: return False def _list_tokens(self, user_id, tenant_id=None, trust_id=None, consumer_id=None): # This function is used to generate the list of tokens that should be # revoked when revoking by token identifiers. This approach will be # deprecated soon, probably in the Juno release. Setting revoke_by_id # to False indicates that this kind of recording should not be # performed. In order to test the revocation events, tokens shouldn't # be deleted from the backends. This check ensures that tokens are # still recorded. if not CONF.token.revoke_by_id: return [] tokens = [] user_key = self._prefix_user_id(user_id) token_list = self._get_user_token_list_with_expiry(user_key) current_time = self._get_current_time() for item in token_list: try: token_id, expires = self._format_token_index_item(item) except (TypeError, ValueError): # NOTE(morganfainberg): Skip on expected error possibilities # from the `_format_token_index_item` method. continue if expires < current_time: continue try: token_ref = self.get_token(token_id) except exception.TokenNotFound: # NOTE(morganfainberg): Token doesn't exist, skip it. continue if token_ref: if tenant_id is not None: if not self._token_match_tenant(token_ref, tenant_id): continue if trust_id is not None: if not self._token_match_trust(token_ref, trust_id): continue if consumer_id is not None: if not self._token_match_consumer(token_ref, consumer_id): continue tokens.append(token_id) return tokens def list_revoked_tokens(self): revoked_token_list = self._get_key_or_default(self.revocation_key, default=[]) if isinstance(revoked_token_list, list): return revoked_token_list return [] def flush_expired_tokens(self): """Archive or delete tokens that have expired.""" raise exception.NotImplemented() keystone-9.0.0/keystone/token/persistence/__init__.py0000664000567000056710000000117212701407102024133 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.token.persistence.core import * # noqa __all__ = ('Manager', 'Driver') keystone-9.0.0/keystone/token/persistence/core.py0000664000567000056710000003255312701407102023333 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Main entry point into the Token Persistence service.""" import abc import copy from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils import six from keystone.common import cache from keystone.common import dependency from keystone.common import manager from keystone import exception from keystone.i18n import _LW from keystone.token import utils CONF = cfg.CONF LOG = log.getLogger(__name__) MEMOIZE = cache.get_memoization_decorator(group='token') REVOCATION_MEMOIZE = cache.get_memoization_decorator(group='token', expiration_group='revoke') @dependency.requires('assignment_api', 'identity_api', 'resource_api', 'token_provider_api', 'trust_api') class PersistenceManager(manager.Manager): """Default pivot point for the Token Persistence backend. See :mod:`keystone.common.manager.Manager` for more details on how this dynamically calls the backend. """ driver_namespace = 'keystone.token.persistence' def __init__(self): super(PersistenceManager, self).__init__(CONF.token.driver) def _assert_valid(self, token_id, token_ref): """Raise TokenNotFound if the token is expired.""" current_time = timeutils.normalize_time(timeutils.utcnow()) expires = token_ref.get('expires') if not expires or current_time > timeutils.normalize_time(expires): raise exception.TokenNotFound(token_id=token_id) def get_token(self, token_id): unique_id = utils.generate_unique_id(token_id) token_ref = self._get_token(unique_id) # NOTE(morganfainberg): Lift expired checking to the manager, there is # no reason to make the drivers implement this check. With caching, # self._get_token could return an expired token. Make sure we behave # as expected and raise TokenNotFound on those instances. self._assert_valid(token_id, token_ref) return token_ref @MEMOIZE def _get_token(self, token_id): # Only ever use the "unique" id in the cache key. return self.driver.get_token(token_id) def create_token(self, token_id, data): unique_id = utils.generate_unique_id(token_id) data_copy = copy.deepcopy(data) data_copy['id'] = unique_id ret = self.driver.create_token(unique_id, data_copy) if MEMOIZE.should_cache(ret): # NOTE(morganfainberg): when doing a cache set, you must pass the # same arguments through, the same as invalidate (this includes # "self"). First argument is always the value to be cached self._get_token.set(ret, self, unique_id) return ret def delete_token(self, token_id): if not CONF.token.revoke_by_id: return unique_id = utils.generate_unique_id(token_id) self.driver.delete_token(unique_id) self._invalidate_individual_token_cache(unique_id) self.invalidate_revocation_list() def delete_tokens(self, user_id, tenant_id=None, trust_id=None, consumer_id=None): if not CONF.token.revoke_by_id: return token_list = self.driver.delete_tokens(user_id, tenant_id, trust_id, consumer_id) for token_id in token_list: unique_id = utils.generate_unique_id(token_id) self._invalidate_individual_token_cache(unique_id) self.invalidate_revocation_list() @REVOCATION_MEMOIZE def list_revoked_tokens(self): return self.driver.list_revoked_tokens() def invalidate_revocation_list(self): # NOTE(morganfainberg): Note that ``self`` needs to be passed to # invalidate() because of the way the invalidation method works on # determining cache-keys. self.list_revoked_tokens.invalidate(self) def delete_tokens_for_domain(self, domain_id): """Delete all tokens for a given domain. It will delete all the project-scoped tokens for the projects that are owned by the given domain, as well as any tokens issued to users that are owned by this domain. However, deletion of domain_scoped tokens will still need to be implemented as stated in TODO below. """ if not CONF.token.revoke_by_id: return projects = self.resource_api.list_projects() for project in projects: if project['domain_id'] == domain_id: for user_id in self.assignment_api.list_user_ids_for_project( project['id']): self.delete_tokens_for_user(user_id, project['id']) # TODO(morganfainberg): implement deletion of domain_scoped tokens. users = self.identity_api.list_users(domain_id) user_ids = (user['id'] for user in users) self.delete_tokens_for_users(user_ids) def delete_tokens_for_user(self, user_id, project_id=None): """Delete all tokens for a given user or user-project combination. This method adds in the extra logic for handling trust-scoped token revocations in a single call instead of needing to explicitly handle trusts in the caller's logic. """ if not CONF.token.revoke_by_id: return self.delete_tokens(user_id, tenant_id=project_id) for trust in self.trust_api.list_trusts_for_trustee(user_id): # Ensure we revoke tokens associated to the trust / project # user_id combination. self.delete_tokens(user_id, trust_id=trust['id'], tenant_id=project_id) for trust in self.trust_api.list_trusts_for_trustor(user_id): # Ensure we revoke tokens associated to the trust / project / # user_id combination where the user_id is the trustor. # NOTE(morganfainberg): This revocation is a bit coarse, but it # covers a number of cases such as disabling of the trustor user, # deletion of the trustor user (for any number of reasons). It # might make sense to refine this and be more surgical on the # deletions (e.g. don't revoke tokens for the trusts when the # trustor changes password). For now, to maintain previous # functionality, this will continue to be a bit overzealous on # revocations. self.delete_tokens(trust['trustee_user_id'], trust_id=trust['id'], tenant_id=project_id) def delete_tokens_for_users(self, user_ids, project_id=None): """Delete all tokens for a list of user_ids. :param user_ids: list of user identifiers :param project_id: optional project identifier """ if not CONF.token.revoke_by_id: return for user_id in user_ids: self.delete_tokens_for_user(user_id, project_id=project_id) def _invalidate_individual_token_cache(self, token_id): # NOTE(morganfainberg): invalidate takes the exact same arguments as # the normal method, this means we need to pass "self" in (which gets # stripped off). # FIXME(morganfainberg): Does this cache actually need to be # invalidated? We maintain a cached revocation list, which should be # consulted before accepting a token as valid. For now we will # do the explicit individual token invalidation. self._get_token.invalidate(self, token_id) self.token_provider_api.invalidate_individual_token_cache(token_id) @dependency.requires('token_provider_api') @dependency.provider('token_api') class Manager(object): """The token_api provider. This class is a proxy class to the token_provider_api's persistence manager. """ def __init__(self): # NOTE(morganfainberg): __init__ is required for dependency processing. super(Manager, self).__init__() def __getattr__(self, item): """Forward calls to the `token_provider_api` persistence manager.""" # NOTE(morganfainberg): Prevent infinite recursion, raise an # AttributeError for 'token_provider_api' ensuring that the dep # injection doesn't infinitely try and lookup self.token_provider_api # on _process_dependencies. This doesn't need an exception string as # it should only ever be hit on instantiation. if item == 'token_provider_api': raise AttributeError() f = getattr(self.token_provider_api._persistence, item) LOG.warning(_LW('`token_api.%s` is deprecated as of Juno in favor of ' 'utilizing methods on `token_provider_api` and may be ' 'removed in Kilo.'), item) setattr(self, item, f) return f @six.add_metaclass(abc.ABCMeta) class TokenDriverV8(object): """Interface description for a Token driver.""" @abc.abstractmethod def get_token(self, token_id): """Get a token by id. :param token_id: identity of the token :type token_id: string :returns: token_ref :raises keystone.exception.TokenNotFound: If the token doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def create_token(self, token_id, data): """Create a token by id and data. :param token_id: identity of the token :type token_id: string :param data: dictionary with additional reference information :: { expires='' id=token_id, user=user_ref, tenant=tenant_ref, metadata=metadata_ref } :type data: dict :returns: token_ref or None. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_token(self, token_id): """Deletes a token by id. :param token_id: identity of the token :type token_id: string :returns: None. :raises keystone.exception.TokenNotFound: If the token doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_tokens(self, user_id, tenant_id=None, trust_id=None, consumer_id=None): """Deletes tokens by user. If the tenant_id is not None, only delete the tokens by user id under the specified tenant. If the trust_id is not None, it will be used to query tokens and the user_id will be ignored. If the consumer_id is not None, only delete the tokens by consumer id that match the specified consumer id. :param user_id: identity of user :type user_id: string :param tenant_id: identity of the tenant :type tenant_id: string :param trust_id: identity of the trust :type trust_id: string :param consumer_id: identity of the consumer :type consumer_id: string :returns: The tokens that have been deleted. :raises keystone.exception.TokenNotFound: If the token doesn't exist. """ if not CONF.token.revoke_by_id: return token_list = self._list_tokens(user_id, tenant_id=tenant_id, trust_id=trust_id, consumer_id=consumer_id) for token in token_list: try: self.delete_token(token) except exception.NotFound: # nosec # The token is already gone, good. pass return token_list @abc.abstractmethod def _list_tokens(self, user_id, tenant_id=None, trust_id=None, consumer_id=None): """Returns a list of current token_id's for a user This is effectively a private method only used by the ``delete_tokens`` method and should not be called by anything outside of the ``token_api`` manager or the token driver itself. :param user_id: identity of the user :type user_id: string :param tenant_id: identity of the tenant :type tenant_id: string :param trust_id: identity of the trust :type trust_id: string :param consumer_id: identity of the consumer :type consumer_id: string :returns: list of token_id's """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_revoked_tokens(self): """Returns a list of all revoked tokens :returns: list of token_id's """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def flush_expired_tokens(self): """Archive or delete tokens that have expired.""" raise exception.NotImplemented() # pragma: no cover Driver = manager.create_legacy_driver(TokenDriverV8) keystone-9.0.0/keystone/token/controllers.py0000664000567000056710000005013712701407102022423 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import sys from keystone.common import utils from keystoneclient.common import cms from oslo_config import cfg from oslo_log import log from oslo_serialization import jsonutils from oslo_utils import timeutils import six from keystone.common import controller from keystone.common import dependency from keystone.common import wsgi from keystone import exception from keystone.i18n import _ from keystone.models import token_model from keystone.token import provider CONF = cfg.CONF LOG = log.getLogger(__name__) class ExternalAuthNotApplicable(Exception): """External authentication is not applicable.""" pass @dependency.requires('assignment_api', 'catalog_api', 'identity_api', 'resource_api', 'role_api', 'token_provider_api', 'trust_api') class Auth(controller.V2Controller): @controller.v2_deprecated def ca_cert(self, context, auth=None): with open(CONF.signing.ca_certs, 'r') as ca_file: data = ca_file.read() return data @controller.v2_deprecated def signing_cert(self, context, auth=None): with open(CONF.signing.certfile, 'r') as cert_file: data = cert_file.read() return data @controller.v2_auth_deprecated def authenticate(self, context, auth=None): """Authenticate credentials and return a token. Accept auth as a dict that looks like:: { "auth":{ "passwordCredentials":{ "username":"test_user", "password":"mypass" }, "tenantName":"customer-x" } } In this case, tenant is optional, if not provided the token will be considered "unscoped" and can later be used to get a scoped token. Alternatively, this call accepts auth with only a token and tenant that will return a token that is scoped to that tenant. """ if auth is None: raise exception.ValidationError(attribute='auth', target='request body') if "token" in auth: # Try to authenticate using a token auth_info = self._authenticate_token( context, auth) else: # Try external authentication try: auth_info = self._authenticate_external( context, auth) except ExternalAuthNotApplicable: # Try local authentication auth_info = self._authenticate_local( context, auth) user_ref, tenant_ref, metadata_ref, expiry, bind, audit_id = auth_info # Validate that the auth info is valid and nothing is disabled try: self.identity_api.assert_user_enabled( user_id=user_ref['id'], user=user_ref) if tenant_ref: self.resource_api.assert_project_enabled( project_id=tenant_ref['id'], project=tenant_ref) except AssertionError as e: six.reraise(exception.Unauthorized, exception.Unauthorized(e), sys.exc_info()[2]) # NOTE(morganfainberg): Make sure the data is in correct form since it # might be consumed external to Keystone and this is a v2.0 controller. # The user_ref is encoded into the auth_token_data which is returned as # part of the token data. The token provider doesn't care about the # format. user_ref = self.v3_to_v2_user(user_ref) if tenant_ref: tenant_ref = self.v3_to_v2_project(tenant_ref) auth_token_data = self._get_auth_token_data(user_ref, tenant_ref, metadata_ref, expiry, audit_id) if tenant_ref: catalog_ref = self.catalog_api.get_catalog( user_ref['id'], tenant_ref['id']) else: catalog_ref = {} auth_token_data['id'] = 'placeholder' if bind: auth_token_data['bind'] = bind roles_ref = [] for role_id in metadata_ref.get('roles', []): role_ref = self.role_api.get_role(role_id) roles_ref.append(dict(name=role_ref['name'])) (token_id, token_data) = self.token_provider_api.issue_v2_token( auth_token_data, roles_ref=roles_ref, catalog_ref=catalog_ref) # NOTE(wanghong): We consume a trust use only when we are using trusts # and have successfully issued a token. if CONF.trust.enabled and 'trust_id' in auth: self.trust_api.consume_use(auth['trust_id']) return token_data def _restrict_scope(self, token_model_ref): # A trust token cannot be used to get another token if token_model_ref.trust_scoped: raise exception.Forbidden() if not CONF.token.allow_rescope_scoped_token: # Do not allow conversion from scoped tokens. if token_model_ref.project_scoped or token_model_ref.domain_scoped: raise exception.Forbidden(action=_("rescope a scoped token")) def _authenticate_token(self, context, auth): """Try to authenticate using an already existing token. Returns auth_token_data, (user_ref, tenant_ref, metadata_ref) """ if 'token' not in auth: raise exception.ValidationError( attribute='token', target='auth') if "id" not in auth['token']: raise exception.ValidationError( attribute="id", target="token") old_token = auth['token']['id'] if len(old_token) > CONF.max_token_size: raise exception.ValidationSizeError(attribute='token', size=CONF.max_token_size) try: token_model_ref = token_model.KeystoneToken( token_id=old_token, token_data=self.token_provider_api.validate_token(old_token)) except exception.NotFound as e: raise exception.Unauthorized(e) wsgi.validate_token_bind(context, token_model_ref) self._restrict_scope(token_model_ref) user_id = token_model_ref.user_id tenant_id = self._get_project_id_from_auth(auth) if not CONF.trust.enabled and 'trust_id' in auth: raise exception.Forbidden('Trusts are disabled.') elif CONF.trust.enabled and 'trust_id' in auth: try: trust_ref = self.trust_api.get_trust(auth['trust_id']) except exception.TrustNotFound: raise exception.Forbidden() if user_id != trust_ref['trustee_user_id']: raise exception.Forbidden() if (trust_ref['project_id'] and tenant_id != trust_ref['project_id']): raise exception.Forbidden() if ('expires' in trust_ref) and (trust_ref['expires']): expiry = trust_ref['expires'] if expiry < timeutils.parse_isotime(utils.isotime()): raise exception.Forbidden() user_id = trust_ref['trustor_user_id'] trustor_user_ref = self.identity_api.get_user( trust_ref['trustor_user_id']) if not trustor_user_ref['enabled']: raise exception.Forbidden() trustee_user_ref = self.identity_api.get_user( trust_ref['trustee_user_id']) if not trustee_user_ref['enabled']: raise exception.Forbidden() if trust_ref['impersonation'] is True: current_user_ref = trustor_user_ref else: current_user_ref = trustee_user_ref else: current_user_ref = self.identity_api.get_user(user_id) metadata_ref = {} tenant_ref, metadata_ref['roles'] = self._get_project_roles_and_ref( user_id, tenant_id) expiry = token_model_ref.expires if CONF.trust.enabled and 'trust_id' in auth: trust_id = auth['trust_id'] trust_roles = [] for role in trust_ref['roles']: if 'roles' not in metadata_ref: raise exception.Forbidden() if role['id'] in metadata_ref['roles']: trust_roles.append(role['id']) else: raise exception.Forbidden() if 'expiry' in trust_ref and trust_ref['expiry']: trust_expiry = timeutils.parse_isotime(trust_ref['expiry']) if trust_expiry < expiry: expiry = trust_expiry metadata_ref['roles'] = trust_roles metadata_ref['trustee_user_id'] = trust_ref['trustee_user_id'] metadata_ref['trust_id'] = trust_id bind = token_model_ref.bind audit_id = token_model_ref.audit_chain_id return (current_user_ref, tenant_ref, metadata_ref, expiry, bind, audit_id) def _authenticate_local(self, context, auth): """Try to authenticate against the identity backend. Returns auth_token_data, (user_ref, tenant_ref, metadata_ref) """ if 'passwordCredentials' not in auth: raise exception.ValidationError( attribute='passwordCredentials', target='auth') if "password" not in auth['passwordCredentials']: raise exception.ValidationError( attribute='password', target='passwordCredentials') password = auth['passwordCredentials']['password'] if password and len(password) > CONF.identity.max_password_length: raise exception.ValidationSizeError( attribute='password', size=CONF.identity.max_password_length) if (not auth['passwordCredentials'].get("userId") and not auth['passwordCredentials'].get("username")): raise exception.ValidationError( attribute='username or userId', target='passwordCredentials') user_id = auth['passwordCredentials'].get('userId') if user_id and len(user_id) > CONF.max_param_size: raise exception.ValidationSizeError(attribute='userId', size=CONF.max_param_size) username = auth['passwordCredentials'].get('username', '') if username: if len(username) > CONF.max_param_size: raise exception.ValidationSizeError(attribute='username', size=CONF.max_param_size) try: user_ref = self.identity_api.get_user_by_name( username, CONF.identity.default_domain_id) user_id = user_ref['id'] except exception.UserNotFound as e: raise exception.Unauthorized(e) try: user_ref = self.identity_api.authenticate( context, user_id=user_id, password=password) except AssertionError as e: raise exception.Unauthorized(e.args[0]) metadata_ref = {} tenant_id = self._get_project_id_from_auth(auth) tenant_ref, metadata_ref['roles'] = self._get_project_roles_and_ref( user_id, tenant_id) expiry = provider.default_expire_time() bind = None audit_id = None return (user_ref, tenant_ref, metadata_ref, expiry, bind, audit_id) def _authenticate_external(self, context, auth): """Try to authenticate an external user via REMOTE_USER variable. Returns auth_token_data, (user_ref, tenant_ref, metadata_ref) """ environment = context.get('environment', {}) if not environment.get('REMOTE_USER'): raise ExternalAuthNotApplicable() username = environment['REMOTE_USER'] try: user_ref = self.identity_api.get_user_by_name( username, CONF.identity.default_domain_id) user_id = user_ref['id'] except exception.UserNotFound as e: raise exception.Unauthorized(e) metadata_ref = {} tenant_id = self._get_project_id_from_auth(auth) tenant_ref, metadata_ref['roles'] = self._get_project_roles_and_ref( user_id, tenant_id) expiry = provider.default_expire_time() bind = None if ('kerberos' in CONF.token.bind and environment.get('AUTH_TYPE', '').lower() == 'negotiate'): bind = {'kerberos': username} audit_id = None return (user_ref, tenant_ref, metadata_ref, expiry, bind, audit_id) def _get_auth_token_data(self, user, tenant, metadata, expiry, audit_id): return dict(user=user, tenant=tenant, metadata=metadata, expires=expiry, parent_audit_id=audit_id) def _get_project_id_from_auth(self, auth): """Extract tenant information from auth dict. Returns a valid tenant_id if it exists, or None if not specified. """ tenant_id = auth.get('tenantId') if tenant_id and len(tenant_id) > CONF.max_param_size: raise exception.ValidationSizeError(attribute='tenantId', size=CONF.max_param_size) tenant_name = auth.get('tenantName') if tenant_name and len(tenant_name) > CONF.max_param_size: raise exception.ValidationSizeError(attribute='tenantName', size=CONF.max_param_size) if tenant_name: if (CONF.resource.project_name_url_safe == 'strict' and utils.is_not_url_safe(tenant_name)): msg = _('Tenant name cannot contain reserved characters.') raise exception.Unauthorized(message=msg) try: tenant_ref = self.resource_api.get_project_by_name( tenant_name, CONF.identity.default_domain_id) tenant_id = tenant_ref['id'] except exception.ProjectNotFound as e: raise exception.Unauthorized(e) return tenant_id def _get_project_roles_and_ref(self, user_id, tenant_id): """Returns the project roles for this user, and the project ref.""" tenant_ref = None role_list = [] if tenant_id: try: tenant_ref = self.resource_api.get_project(tenant_id) role_list = self.assignment_api.get_roles_for_user_and_project( user_id, tenant_id) except exception.ProjectNotFound: msg = _('Project ID not found: %(t_id)s') % {'t_id': tenant_id} raise exception.Unauthorized(msg) if not role_list: msg = _('User %(u_id)s is unauthorized for tenant %(t_id)s') msg = msg % {'u_id': user_id, 't_id': tenant_id} LOG.warning(msg) raise exception.Unauthorized(msg) return (tenant_ref, role_list) def _get_token_ref(self, token_id, belongs_to=None): """Returns a token if a valid one exists. Optionally, limited to a token owned by a specific tenant. """ token_ref = token_model.KeystoneToken( token_id=token_id, token_data=self.token_provider_api.validate_token(token_id)) if belongs_to: if not token_ref.project_scoped: raise exception.Unauthorized( _('Token does not belong to specified tenant.')) if token_ref.project_id != belongs_to: raise exception.Unauthorized( _('Token does not belong to specified tenant.')) return token_ref @controller.v2_deprecated @controller.protected() def validate_token_head(self, context, token_id): """Check that a token is valid. Optionally, also ensure that it is owned by a specific tenant. Identical to ``validate_token``, except does not return a response. The code in ``keystone.common.wsgi.render_response`` will remove the content body. """ belongs_to = context['query_string'].get('belongsTo') return self.token_provider_api.validate_v2_token(token_id, belongs_to) @controller.v2_deprecated @controller.protected() def validate_token(self, context, token_id): """Check that a token is valid. Optionally, also ensure that it is owned by a specific tenant. Returns metadata about the token along any associated roles. """ belongs_to = context['query_string'].get('belongsTo') # TODO(ayoung) validate against revocation API return self.token_provider_api.validate_v2_token(token_id, belongs_to) @controller.v2_deprecated def delete_token(self, context, token_id): """Delete a token, effectively invalidating it for authz.""" # TODO(termie): this stuff should probably be moved to middleware self.assert_admin(context) self.token_provider_api.revoke_token(token_id) @controller.v2_deprecated @controller.protected() def revocation_list(self, context, auth=None): if not CONF.token.revoke_by_id: raise exception.Gone() tokens = self.token_provider_api.list_revoked_tokens() for t in tokens: expires = t['expires'] if expires and isinstance(expires, datetime.datetime): t['expires'] = utils.isotime(expires) data = {'revoked': tokens} json_data = jsonutils.dumps(data) signed_text = cms.cms_sign_text(json_data, CONF.signing.certfile, CONF.signing.keyfile) return {'signed': signed_text} @controller.v2_deprecated def endpoints(self, context, token_id): """Return a list of endpoints available to the token.""" self.assert_admin(context) token_ref = self._get_token_ref(token_id) catalog_ref = None if token_ref.project_id: catalog_ref = self.catalog_api.get_catalog( token_ref.user_id, token_ref.project_id) return Auth.format_endpoint_list(catalog_ref) @classmethod def format_endpoint_list(cls, catalog_ref): """Formats a list of endpoints according to Identity API v2. The v2.0 API wants an endpoint list to look like:: { 'endpoints': [ { 'id': $endpoint_id, 'name': $SERVICE[name], 'type': $SERVICE, 'tenantId': $tenant_id, 'region': $REGION, } ], 'endpoints_links': [], } """ if not catalog_ref: return {} endpoints = [] for region_name, region_ref in catalog_ref.items(): for service_type, service_ref in region_ref.items(): endpoints.append({ 'id': service_ref.get('id'), 'name': service_ref.get('name'), 'type': service_type, 'region': region_name, 'publicURL': service_ref.get('publicURL'), 'internalURL': service_ref.get('internalURL'), 'adminURL': service_ref.get('adminURL'), }) return {'endpoints': endpoints, 'endpoints_links': []} keystone-9.0.0/keystone/token/routers.py0000664000567000056710000000522512701407102021556 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common import wsgi from keystone.token import controllers class Router(wsgi.ComposableRouter): def add_routes(self, mapper): token_controller = controllers.Auth() mapper.connect('/tokens', controller=token_controller, action='authenticate', conditions=dict(method=['POST'])) mapper.connect('/tokens/revoked', controller=token_controller, action='revocation_list', conditions=dict(method=['GET'])) mapper.connect('/tokens/{token_id}', controller=token_controller, action='validate_token', conditions=dict(method=['GET'])) # NOTE(morganfainberg): For policy enforcement reasons, the # ``validate_token_head`` method is still used for HEAD requests. # The controller method makes the same call as the validate_token # call and lets wsgi.render_response remove the body data. mapper.connect('/tokens/{token_id}', controller=token_controller, action='validate_token_head', conditions=dict(method=['HEAD'])) mapper.connect('/tokens/{token_id}', controller=token_controller, action='delete_token', conditions=dict(method=['DELETE'])) mapper.connect('/tokens/{token_id}/endpoints', controller=token_controller, action='endpoints', conditions=dict(method=['GET'])) # Certificates used to verify auth tokens mapper.connect('/certificates/ca', controller=token_controller, action='ca_cert', conditions=dict(method=['GET'])) mapper.connect('/certificates/signing', controller=token_controller, action='signing_cert', conditions=dict(method=['GET'])) keystone-9.0.0/keystone/token/provider.py0000664000567000056710000006162412701407102021712 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Token provider interface.""" import abc import base64 import datetime import sys import uuid from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils import six from keystone.common import cache from keystone.common import dependency from keystone.common import manager from keystone import exception from keystone.i18n import _, _LE from keystone.models import token_model from keystone import notifications from keystone.token import persistence from keystone.token import providers from keystone.token import utils CONF = cfg.CONF LOG = log.getLogger(__name__) MEMOIZE = cache.get_memoization_decorator(group='token') # NOTE(morganfainberg): This is for compatibility in case someone was relying # on the old location of the UnsupportedTokenVersionException for their code. UnsupportedTokenVersionException = exception.UnsupportedTokenVersionException # supported token versions V2 = token_model.V2 V3 = token_model.V3 VERSIONS = token_model.VERSIONS def base64_encode(s): """Encode a URL-safe string. :type s: six.text_type :rtype: six.text_type """ # urlsafe_b64encode() returns six.binary_type so need to convert to # six.text_type, might as well do it before stripping. return base64.urlsafe_b64encode(s).decode('utf-8').rstrip('=') def random_urlsafe_str(): """Generate a random URL-safe string. :rtype: six.text_type """ # chop the padding (==) off the end of the encoding to save space return base64.urlsafe_b64encode(uuid.uuid4().bytes)[:-2].decode('utf-8') def random_urlsafe_str_to_bytes(s): """Convert a string from :func:`random_urlsafe_str()` to six.binary_type. :type s: six.text_type :rtype: six.binary_type """ # urlsafe_b64decode() requires str, unicode isn't accepted. s = str(s) # restore the padding (==) at the end of the string return base64.urlsafe_b64decode(s + '==') def default_expire_time(): """Determine when a fresh token should expire. Expiration time varies based on configuration (see ``[token] expiration``). :returns: a naive UTC datetime.datetime object """ expire_delta = datetime.timedelta(seconds=CONF.token.expiration) return timeutils.utcnow() + expire_delta def audit_info(parent_audit_id): """Build the audit data for a token. If ``parent_audit_id`` is None, the list will be one element in length containing a newly generated audit_id. If ``parent_audit_id`` is supplied, the list will be two elements in length containing a newly generated audit_id and the ``parent_audit_id``. The ``parent_audit_id`` will always be element index 1 in the resulting list. :param parent_audit_id: the audit of the original token in the chain :type parent_audit_id: str :returns: Keystone token audit data """ audit_id = random_urlsafe_str() if parent_audit_id is not None: return [audit_id, parent_audit_id] return [audit_id] @dependency.provider('token_provider_api') @dependency.requires('assignment_api', 'revoke_api') class Manager(manager.Manager): """Default pivot point for the token provider backend. See :mod:`keystone.common.manager.Manager` for more details on how this dynamically calls the backend. """ driver_namespace = 'keystone.token.provider' V2 = V2 V3 = V3 VERSIONS = VERSIONS INVALIDATE_PROJECT_TOKEN_PERSISTENCE = 'invalidate_project_tokens' INVALIDATE_USER_TOKEN_PERSISTENCE = 'invalidate_user_tokens' _persistence_manager = None def __init__(self): super(Manager, self).__init__(CONF.token.provider) self._register_callback_listeners() def _register_callback_listeners(self): # This is used by the @dependency.provider decorator to register the # provider (token_provider_api) manager to listen for trust deletions. callbacks = { notifications.ACTIONS.deleted: [ ['OS-TRUST:trust', self._trust_deleted_event_callback], ['user', self._delete_user_tokens_callback], ['domain', self._delete_domain_tokens_callback], ], notifications.ACTIONS.disabled: [ ['user', self._delete_user_tokens_callback], ['domain', self._delete_domain_tokens_callback], ['project', self._delete_project_tokens_callback], ], notifications.ACTIONS.internal: [ [notifications.INVALIDATE_USER_TOKEN_PERSISTENCE, self._delete_user_tokens_callback], [notifications.INVALIDATE_USER_PROJECT_TOKEN_PERSISTENCE, self._delete_user_project_tokens_callback], [notifications.INVALIDATE_USER_OAUTH_CONSUMER_TOKENS, self._delete_user_oauth_consumer_tokens_callback], ] } for event, cb_info in callbacks.items(): for resource_type, callback_fns in cb_info: notifications.register_event_callback(event, resource_type, callback_fns) @property def _needs_persistence(self): return self.driver.needs_persistence() @property def _persistence(self): # NOTE(morganfainberg): This should not be handled via __init__ to # avoid dependency injection oddities circular dependencies (where # the provider manager requires the token persistence manager, which # requires the token provider manager). if self._persistence_manager is None: self._persistence_manager = persistence.PersistenceManager() return self._persistence_manager def _create_token(self, token_id, token_data): try: if isinstance(token_data['expires'], six.string_types): token_data['expires'] = timeutils.normalize_time( timeutils.parse_isotime(token_data['expires'])) self._persistence.create_token(token_id, token_data) except Exception: exc_info = sys.exc_info() # an identical token may have been created already. # if so, return the token_data as it is also identical try: self._persistence.get_token(token_id) except exception.TokenNotFound: six.reraise(*exc_info) def validate_token(self, token_id, belongs_to=None): unique_id = utils.generate_unique_id(token_id) # NOTE(morganfainberg): Ensure we never use the long-form token_id # (PKI) as part of the cache_key. token = self._validate_token(unique_id) self._token_belongs_to(token, belongs_to) self._is_valid_token(token) return token def check_revocation_v2(self, token): try: token_data = token['access'] except KeyError: raise exception.TokenNotFound(_('Failed to validate token')) token_values = self.revoke_api.model.build_token_values_v2( token_data, CONF.identity.default_domain_id) self.revoke_api.check_token(token_values) def validate_v2_token(self, token_id, belongs_to=None): # NOTE(lbragstad): Only go to the persistence backend if the token # provider requires it. if self._needs_persistence: # NOTE(morganfainberg): Ensure we never use the long-form token_id # (PKI) as part of the cache_key. unique_id = utils.generate_unique_id(token_id) token_ref = self._persistence.get_token(unique_id) token = self._validate_v2_token(token_ref) else: # NOTE(lbragstad): If the token doesn't require persistence, then # it is a fernet token. The fernet token provider doesn't care if # it's creating version 2.0 tokens or v3 tokens, so we use the same # validate_non_persistent_token() method to validate both. Then we # can leverage a separate method to make version 3 token data look # like version 2.0 token data. The pattern we want to move towards # is one where the token providers just handle data and the # controller layers handle interpreting the token data in a format # that makes sense for the request. v3_token_ref = self.validate_non_persistent_token(token_id) v2_token_data_helper = providers.common.V2TokenDataHelper() token = v2_token_data_helper.v3_to_v2_token(v3_token_ref) # these are common things that happen regardless of token provider token['access']['token']['id'] = token_id self._token_belongs_to(token, belongs_to) self._is_valid_token(token) return token def check_revocation_v3(self, token): try: token_data = token['token'] except KeyError: raise exception.TokenNotFound(_('Failed to validate token')) token_values = self.revoke_api.model.build_token_values(token_data) self.revoke_api.check_token(token_values) def check_revocation(self, token): version = self.get_token_version(token) if version == V2: return self.check_revocation_v2(token) else: return self.check_revocation_v3(token) def validate_v3_token(self, token_id): if not token_id: raise exception.TokenNotFound(_('No token in the request')) try: # NOTE(lbragstad): Only go to persistent storage if we have a token # to fetch from the backend (the driver persists the token). # Otherwise the information about the token must be in the token # id. if not self._needs_persistence: token_ref = self.validate_non_persistent_token(token_id) else: unique_id = utils.generate_unique_id(token_id) # NOTE(morganfainberg): Ensure we never use the long-form # token_id (PKI) as part of the cache_key. token_ref = self._persistence.get_token(unique_id) token_ref = self._validate_v3_token(token_ref) self._is_valid_token(token_ref) return token_ref except exception.Unauthorized as e: LOG.debug('Unable to validate token: %s', e) raise exception.TokenNotFound(token_id=token_id) @MEMOIZE def _validate_token(self, token_id): if not token_id: raise exception.TokenNotFound(_('No token in the request')) if not self._needs_persistence: # NOTE(lbragstad): This will validate v2 and v3 non-persistent # tokens. return self.driver.validate_non_persistent_token(token_id) token_ref = self._persistence.get_token(token_id) version = self.get_token_version(token_ref) if version == self.V3: try: return self.driver.validate_v3_token(token_ref) except exception.Unauthorized as e: LOG.debug('Unable to validate token: %s', e) raise exception.TokenNotFound(token_id=token_id) elif version == self.V2: return self.driver.validate_v2_token(token_ref) raise exception.UnsupportedTokenVersionException() @MEMOIZE def _validate_v2_token(self, token_id): return self.driver.validate_v2_token(token_id) @MEMOIZE def _validate_v3_token(self, token_id): return self.driver.validate_v3_token(token_id) def _is_valid_token(self, token): """Verify the token is valid format and has not expired.""" current_time = timeutils.normalize_time(timeutils.utcnow()) try: # Get the data we need from the correct location (V2 and V3 tokens # differ in structure, Try V3 first, fall back to V2 second) token_data = token.get('token', token.get('access')) expires_at = token_data.get('expires_at', token_data.get('expires')) if not expires_at: expires_at = token_data['token']['expires'] expiry = timeutils.normalize_time( timeutils.parse_isotime(expires_at)) except Exception: LOG.exception(_LE('Unexpected error or malformed token ' 'determining token expiry: %s'), token) raise exception.TokenNotFound(_('Failed to validate token')) if current_time < expiry: self.check_revocation(token) # Token has not expired and has not been revoked. return None else: raise exception.TokenNotFound(_('Failed to validate token')) def _token_belongs_to(self, token, belongs_to): """Check if the token belongs to the right tenant. This is only used on v2 tokens. The structural validity of the token will have already been checked before this method is called. """ if belongs_to: token_data = token['access']['token'] if ('tenant' not in token_data or token_data['tenant']['id'] != belongs_to): raise exception.Unauthorized() def issue_v2_token(self, token_ref, roles_ref=None, catalog_ref=None): token_id, token_data = self.driver.issue_v2_token( token_ref, roles_ref, catalog_ref) if self._needs_persistence: data = dict(key=token_id, id=token_id, expires=token_data['access']['token']['expires'], user=token_ref['user'], tenant=token_ref['tenant'], metadata=token_ref['metadata'], token_data=token_data, bind=token_ref.get('bind'), trust_id=token_ref['metadata'].get('trust_id'), token_version=self.V2) self._create_token(token_id, data) return token_id, token_data def issue_v3_token(self, user_id, method_names, expires_at=None, project_id=None, domain_id=None, auth_context=None, trust=None, metadata_ref=None, include_catalog=True, parent_audit_id=None): token_id, token_data = self.driver.issue_v3_token( user_id, method_names, expires_at, project_id, domain_id, auth_context, trust, metadata_ref, include_catalog, parent_audit_id) if metadata_ref is None: metadata_ref = {} if 'project' in token_data['token']: # project-scoped token, fill in the v2 token data # all we care are the role IDs # FIXME(gyee): is there really a need to store roles in metadata? role_ids = [r['id'] for r in token_data['token']['roles']] metadata_ref = {'roles': role_ids} if trust: metadata_ref.setdefault('trust_id', trust['id']) metadata_ref.setdefault('trustee_user_id', trust['trustee_user_id']) data = dict(key=token_id, id=token_id, expires=token_data['token']['expires_at'], user=token_data['token']['user'], tenant=token_data['token'].get('project'), metadata=metadata_ref, token_data=token_data, trust_id=trust['id'] if trust else None, token_version=self.V3) if self._needs_persistence: self._create_token(token_id, data) return token_id, token_data def invalidate_individual_token_cache(self, token_id): # NOTE(morganfainberg): invalidate takes the exact same arguments as # the normal method, this means we need to pass "self" in (which gets # stripped off). # FIXME(morganfainberg): Does this cache actually need to be # invalidated? We maintain a cached revocation list, which should be # consulted before accepting a token as valid. For now we will # do the explicit individual token invalidation. self._validate_token.invalidate(self, token_id) self._validate_v2_token.invalidate(self, token_id) self._validate_v3_token.invalidate(self, token_id) def revoke_token(self, token_id, revoke_chain=False): revoke_by_expires = False project_id = None domain_id = None token_ref = token_model.KeystoneToken( token_id=token_id, token_data=self.validate_token(token_id)) user_id = token_ref.user_id expires_at = token_ref.expires audit_id = token_ref.audit_id audit_chain_id = token_ref.audit_chain_id if token_ref.project_scoped: project_id = token_ref.project_id if token_ref.domain_scoped: domain_id = token_ref.domain_id if audit_id is None and not revoke_chain: LOG.debug('Received token with no audit_id.') revoke_by_expires = True if audit_chain_id is None and revoke_chain: LOG.debug('Received token with no audit_chain_id.') revoke_by_expires = True if revoke_by_expires: self.revoke_api.revoke_by_expiration(user_id, expires_at, project_id=project_id, domain_id=domain_id) elif revoke_chain: self.revoke_api.revoke_by_audit_chain_id(audit_chain_id, project_id=project_id, domain_id=domain_id) else: self.revoke_api.revoke_by_audit_id(audit_id) if CONF.token.revoke_by_id and self._needs_persistence: self._persistence.delete_token(token_id=token_id) def list_revoked_tokens(self): return self._persistence.list_revoked_tokens() def _trust_deleted_event_callback(self, service, resource_type, operation, payload): if CONF.token.revoke_by_id: trust_id = payload['resource_info'] trust = self.trust_api.get_trust(trust_id, deleted=True) self._persistence.delete_tokens(user_id=trust['trustor_user_id'], trust_id=trust_id) def _delete_user_tokens_callback(self, service, resource_type, operation, payload): if CONF.token.revoke_by_id: user_id = payload['resource_info'] self._persistence.delete_tokens_for_user(user_id) def _delete_domain_tokens_callback(self, service, resource_type, operation, payload): if CONF.token.revoke_by_id: domain_id = payload['resource_info'] self._persistence.delete_tokens_for_domain(domain_id=domain_id) def _delete_user_project_tokens_callback(self, service, resource_type, operation, payload): if CONF.token.revoke_by_id: user_id = payload['resource_info']['user_id'] project_id = payload['resource_info']['project_id'] self._persistence.delete_tokens_for_user(user_id=user_id, project_id=project_id) def _delete_project_tokens_callback(self, service, resource_type, operation, payload): if CONF.token.revoke_by_id: project_id = payload['resource_info'] self._persistence.delete_tokens_for_users( self.assignment_api.list_user_ids_for_project(project_id), project_id=project_id) def _delete_user_oauth_consumer_tokens_callback(self, service, resource_type, operation, payload): if CONF.token.revoke_by_id: user_id = payload['resource_info']['user_id'] consumer_id = payload['resource_info']['consumer_id'] self._persistence.delete_tokens(user_id=user_id, consumer_id=consumer_id) @six.add_metaclass(abc.ABCMeta) class Provider(object): """Interface description for a Token provider.""" @abc.abstractmethod def needs_persistence(self): """Determine if the token should be persisted. If the token provider requires that the token be persisted to a backend this should return True, otherwise return False. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_token_version(self, token_data): """Return the version of the given token data. If the given token data is unrecognizable, UnsupportedTokenVersionException is raised. :param token_data: token_data :type token_data: dict :returns: token version string :raises keystone.exception.UnsupportedTokenVersionException: If the token version is not expected. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def issue_v2_token(self, token_ref, roles_ref=None, catalog_ref=None): """Issue a V2 token. :param token_ref: token data to generate token from :type token_ref: dict :param roles_ref: optional roles list :type roles_ref: dict :param catalog_ref: optional catalog information :type catalog_ref: dict :returns: (token_id, token_data) """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def issue_v3_token(self, user_id, method_names, expires_at=None, project_id=None, domain_id=None, auth_context=None, trust=None, metadata_ref=None, include_catalog=True, parent_audit_id=None): """Issue a V3 Token. :param user_id: identity of the user :type user_id: string :param method_names: names of authentication methods :type method_names: list :param expires_at: optional time the token will expire :type expires_at: string :param project_id: optional project identity :type project_id: string :param domain_id: optional domain identity :type domain_id: string :param auth_context: optional context from the authorization plugins :type auth_context: dict :param trust: optional trust reference :type trust: dict :param metadata_ref: optional metadata reference :type metadata_ref: dict :param include_catalog: optional, include the catalog in token data :type include_catalog: boolean :param parent_audit_id: optional, the audit id of the parent token :type parent_audit_id: string :returns: (token_id, token_data) """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def validate_v2_token(self, token_ref): """Validate the given V2 token and return the token data. Must raise Unauthorized exception if unable to validate token. :param token_ref: the token reference :type token_ref: dict :returns: token data :raises keystone.exception.TokenNotFound: If the token doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def validate_non_persistent_token(self, token_id): """Validate a given non-persistent token id and return the token_data. :param token_id: the token id :type token_id: string :returns: token data :raises keystone.exception.TokenNotFound: When the token is invalid """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def validate_v3_token(self, token_ref): """Validate the given V3 token and return the token_data. :param token_ref: the token reference :type token_ref: dict :returns: token data :raises keystone.exception.TokenNotFound: If the token doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def _get_token_id(self, token_data): """Generate the token_id based upon the data in token_data. :param token_data: token information :type token_data: dict :returns: token identifier :rtype: six.text_type """ raise exception.NotImplemented() # pragma: no cover keystone-9.0.0/keystone/token/providers/0000775000567000056710000000000012701407246021523 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/token/providers/pki.py0000664000567000056710000000437712701407102022662 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Keystone PKI Token Provider""" from keystoneclient.common import cms from oslo_config import cfg from oslo_log import log from oslo_log import versionutils from oslo_serialization import jsonutils from keystone.common import environment from keystone.common import utils from keystone import exception from keystone.i18n import _, _LE from keystone.token.providers import common CONF = cfg.CONF LOG = log.getLogger(__name__) @versionutils.deprecated( as_of=versionutils.deprecated.MITAKA, what='the PKI token provider', in_favor_of='the Fernet or UUID token providers') class Provider(common.BaseProvider): def _get_token_id(self, token_data): try: # force conversion to a string as the keystone client cms code # produces unicode. This can be removed if the client returns # str() # TODO(ayoung): Make to a byte_str for Python3 token_json = jsonutils.dumps(token_data, cls=utils.PKIEncoder) token_id = str(cms.cms_sign_token(token_json, CONF.signing.certfile, CONF.signing.keyfile)) return token_id except environment.subprocess.CalledProcessError: LOG.exception(_LE('Unable to sign token')) raise exception.UnexpectedError(_( 'Unable to sign token.')) @property def _supports_bind_authentication(self): """Return if the token provider supports bind authentication methods. :returns: True """ return True def needs_persistence(self): """Should the token be written to a backend.""" return True keystone-9.0.0/keystone/token/providers/fernet/0000775000567000056710000000000012701407246023006 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/token/providers/fernet/utils.py0000664000567000056710000002445112701407102024515 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import stat from cryptography import fernet from oslo_config import cfg from oslo_log import log from keystone.i18n import _LE, _LW, _LI LOG = log.getLogger(__name__) CONF = cfg.CONF def validate_key_repository(requires_write=False): """Validate permissions on the key repository directory.""" # NOTE(lbragstad): We shouldn't need to check if the directory was passed # in as None because we don't set allow_no_values to True. # ensure current user has sufficient access to the key repository is_valid = (os.access(CONF.fernet_tokens.key_repository, os.R_OK) and os.access(CONF.fernet_tokens.key_repository, os.X_OK)) if requires_write: is_valid = (is_valid and os.access(CONF.fernet_tokens.key_repository, os.W_OK)) if not is_valid: LOG.error( _LE('Either [fernet_tokens] key_repository does not exist or ' 'Keystone does not have sufficient permission to access it: ' '%s'), CONF.fernet_tokens.key_repository) else: # ensure the key repository isn't world-readable stat_info = os.stat(CONF.fernet_tokens.key_repository) if(stat_info.st_mode & stat.S_IROTH or stat_info.st_mode & stat.S_IXOTH): LOG.warning(_LW( '[fernet_tokens] key_repository is world readable: %s'), CONF.fernet_tokens.key_repository) return is_valid def _convert_to_integers(id_value): """Cast user and group system identifiers to integers.""" # NOTE(lbragstad) os.chown() will raise a TypeError here if # keystone_user_id and keystone_group_id are not integers. Let's # cast them to integers if we can because it's possible to pass non-integer # values into the fernet_setup utility. try: id_int = int(id_value) except ValueError as e: msg = _LE('Unable to convert Keystone user or group ID. Error: %s') LOG.error(msg, e) raise return id_int def create_key_directory(keystone_user_id=None, keystone_group_id=None): """If the configured key directory does not exist, attempt to create it.""" if not os.access(CONF.fernet_tokens.key_repository, os.F_OK): LOG.info(_LI( '[fernet_tokens] key_repository does not appear to exist; ' 'attempting to create it')) try: os.makedirs(CONF.fernet_tokens.key_repository, 0o700) except OSError: LOG.error(_LE( 'Failed to create [fernet_tokens] key_repository: either it ' 'already exists or you don\'t have sufficient permissions to ' 'create it')) if keystone_user_id and keystone_group_id: os.chown( CONF.fernet_tokens.key_repository, keystone_user_id, keystone_group_id) elif keystone_user_id or keystone_group_id: LOG.warning(_LW( 'Unable to change the ownership of [fernet_tokens] ' 'key_repository without a keystone user ID and keystone group ' 'ID both being provided: %s') % CONF.fernet_tokens.key_repository) def _create_new_key(keystone_user_id, keystone_group_id): """Securely create a new encryption key. Create a new key that is readable by the Keystone group and Keystone user. """ key = fernet.Fernet.generate_key() # key is bytes # This ensures the key created is not world-readable old_umask = os.umask(0o177) if keystone_user_id and keystone_group_id: old_egid = os.getegid() old_euid = os.geteuid() os.setegid(keystone_group_id) os.seteuid(keystone_user_id) elif keystone_user_id or keystone_group_id: LOG.warning(_LW( 'Unable to change the ownership of the new key without a keystone ' 'user ID and keystone group ID both being provided: %s') % CONF.fernet_tokens.key_repository) # Determine the file name of the new key key_file = os.path.join(CONF.fernet_tokens.key_repository, '0') try: with open(key_file, 'w') as f: f.write(key.decode('utf-8')) # convert key to str for the file. finally: # After writing the key, set the umask back to it's original value. Do # the same with group and user identifiers if a Keystone group or user # was supplied. os.umask(old_umask) if keystone_user_id and keystone_group_id: os.seteuid(old_euid) os.setegid(old_egid) LOG.info(_LI('Created a new key: %s'), key_file) def initialize_key_repository(keystone_user_id=None, keystone_group_id=None): """Create a key repository and bootstrap it with a key. :param keystone_user_id: User ID of the Keystone user. :param keystone_group_id: Group ID of the Keystone user. """ # make sure we have work to do before proceeding if os.access(os.path.join(CONF.fernet_tokens.key_repository, '0'), os.F_OK): LOG.info(_LI('Key repository is already initialized; aborting.')) return # bootstrap an existing key _create_new_key(keystone_user_id, keystone_group_id) # ensure that we end up with a primary and secondary key rotate_keys(keystone_user_id, keystone_group_id) def rotate_keys(keystone_user_id=None, keystone_group_id=None): """Create a new primary key and revoke excess active keys. :param keystone_user_id: User ID of the Keystone user. :param keystone_group_id: Group ID of the Keystone user. Key rotation utilizes the following behaviors: - The highest key number is used as the primary key (used for encryption). - All keys can be used for decryption. - New keys are always created as key "0," which serves as a placeholder before promoting it to be the primary key. This strategy allows you to safely perform rotation on one node in a cluster, before syncing the results of the rotation to all other nodes (during both key rotation and synchronization, all nodes must recognize all primary keys). """ # read the list of key files key_files = dict() for filename in os.listdir(CONF.fernet_tokens.key_repository): path = os.path.join(CONF.fernet_tokens.key_repository, str(filename)) if os.path.isfile(path): try: key_id = int(filename) except ValueError: # nosec : name isn't a number, ignore the file. pass else: key_files[key_id] = path LOG.info(_LI('Starting key rotation with %(count)s key files: %(list)s'), { 'count': len(key_files), 'list': list(key_files.values())}) # determine the number of the new primary key current_primary_key = max(key_files.keys()) LOG.info(_LI('Current primary key is: %s'), current_primary_key) new_primary_key = current_primary_key + 1 LOG.info(_LI('Next primary key will be: %s'), new_primary_key) # promote the next primary key to be the primary os.rename( os.path.join(CONF.fernet_tokens.key_repository, '0'), os.path.join(CONF.fernet_tokens.key_repository, str(new_primary_key))) key_files.pop(0) key_files[new_primary_key] = os.path.join( CONF.fernet_tokens.key_repository, str(new_primary_key)) LOG.info(_LI('Promoted key 0 to be the primary: %s'), new_primary_key) # add a new key to the rotation, which will be the *next* primary _create_new_key(keystone_user_id, keystone_group_id) max_active_keys = CONF.fernet_tokens.max_active_keys # check for bad configuration if max_active_keys < 1: LOG.warning(_LW( '[fernet_tokens] max_active_keys must be at least 1 to maintain a ' 'primary key.')) max_active_keys = 1 # purge excess keys # Note that key_files doesn't contain the new active key that was created, # only the old active keys. keys = sorted(key_files.keys(), reverse=True) while len(keys) > (max_active_keys - 1): index_to_purge = keys.pop() key_to_purge = key_files[index_to_purge] LOG.info(_LI('Excess key to purge: %s'), key_to_purge) os.remove(key_to_purge) def load_keys(): """Load keys from disk into a list. The first key in the list is the primary key used for encryption. All other keys are active secondary keys that can be used for decrypting tokens. """ if not validate_key_repository(): return [] # build a dictionary of key_number:encryption_key pairs keys = dict() for filename in os.listdir(CONF.fernet_tokens.key_repository): path = os.path.join(CONF.fernet_tokens.key_repository, str(filename)) if os.path.isfile(path): with open(path, 'r') as key_file: try: key_id = int(filename) except ValueError: # nosec : filename isn't a number, ignore # this file since it's not a key. pass else: keys[key_id] = key_file.read() if len(keys) != CONF.fernet_tokens.max_active_keys: # If there haven't been enough key rotations to reach max_active_keys, # or if the configured value of max_active_keys has changed since the # last rotation, then reporting the discrepancy might be useful. Once # the number of keys matches max_active_keys, this log entry is too # repetitive to be useful. LOG.info(_LI( 'Loaded %(count)d encryption keys (max_active_keys=%(max)d) from: ' '%(dir)s'), { 'count': len(keys), 'max': CONF.fernet_tokens.max_active_keys, 'dir': CONF.fernet_tokens.key_repository}) # return the encryption_keys, sorted by key number, descending return [keys[x] for x in sorted(keys.keys(), reverse=True)] keystone-9.0.0/keystone/token/providers/fernet/__init__.py0000664000567000056710000000113512701407102025106 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.token.providers.fernet.core import * # noqa keystone-9.0.0/keystone/token/providers/fernet/core.py0000664000567000056710000002115412701407102024302 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from keystone.common import dependency from keystone.common import utils as ks_utils from keystone.federation import constants as federation_constants from keystone.token import provider from keystone.token.providers import common from keystone.token.providers.fernet import token_formatters as tf CONF = cfg.CONF @dependency.requires('trust_api', 'oauth_api') class Provider(common.BaseProvider): def __init__(self, *args, **kwargs): super(Provider, self).__init__(*args, **kwargs) self.token_formatter = tf.TokenFormatter() def needs_persistence(self): """Should the token be written to a backend.""" return False def issue_v2_token(self, *args, **kwargs): token_id, token_data = super(Provider, self).issue_v2_token( *args, **kwargs) self._build_issued_at_info(token_id, token_data) return token_id, token_data def issue_v3_token(self, *args, **kwargs): token_id, token_data = super(Provider, self).issue_v3_token( *args, **kwargs) self._build_issued_at_info(token_id, token_data) return token_id, token_data def _build_issued_at_info(self, token_id, token_data): # NOTE(roxanaghe, lbragstad): We must use the creation time that # Fernet builds into it's token. The Fernet spec details that the # token creation time is built into the token, outside of the payload # provided by Keystone. This is the reason why we don't pass the # issued_at time in the payload. This also means that we shouldn't # return a token reference with a creation time that we created # when Fernet uses a different creation time. We should use the # creation time provided by Fernet because it's the creation time # that we have to rely on when we validate the token. fernet_creation_datetime_obj = self.token_formatter.creation_time( token_id) if token_data.get('access'): token_data['access']['token']['issued_at'] = ks_utils.isotime( at=fernet_creation_datetime_obj, subsecond=True) else: token_data['token']['issued_at'] = ks_utils.isotime( at=fernet_creation_datetime_obj, subsecond=True) def _build_federated_info(self, token_data): """Extract everything needed for federated tokens. This dictionary is passed to federated token formatters, which unpack the values and build federated Fernet tokens. """ token_data = token_data['token'] try: user = token_data['user'] federation = user[federation_constants.FEDERATION] idp_id = federation['identity_provider']['id'] protocol_id = federation['protocol']['id'] except KeyError: # The token data doesn't have federated info, so we aren't dealing # with a federated token and no federated info to build. return group_ids = federation.get('groups') return {'group_ids': group_ids, 'idp_id': idp_id, 'protocol_id': protocol_id} def _rebuild_federated_info(self, federated_dict, user_id): """Format federated information into the token reference. The federated_dict is passed back from the federated token formatters. The responsibility of this method is to format the information passed back from the token formatter into the token reference before constructing the token data from the V3TokenDataHelper. """ g_ids = federated_dict['group_ids'] idp_id = federated_dict['idp_id'] protocol_id = federated_dict['protocol_id'] federated_info = { 'groups': g_ids, 'identity_provider': {'id': idp_id}, 'protocol': {'id': protocol_id} } token_dict = { 'user': { federation_constants.FEDERATION: federated_info, 'id': user_id, 'name': user_id, 'domain': {'id': CONF.federation.federated_domain_name, 'name': CONF.federation.federated_domain_name, }, } } return token_dict def _rebuild_federated_token_roles(self, token_dict, federated_dict, user_id, project_id, domain_id): """Populate roles based on (groups, project/domain) pair. We must populate roles from (groups, project/domain) as ephemeral users don't exist in the backend. Upon success, a ``roles`` key will be added to ``token_dict``. :param token_dict: dictionary with data used for building token :param federated_dict: federated information such as identity provider protocol and set of group IDs :param user_id: user ID :param project_id: project ID the token is being scoped to :param domain_id: domain ID the token is being scoped to """ group_ids = [x['id'] for x in federated_dict['group_ids']] self.v3_token_data_helper.populate_roles_for_groups( token_dict, group_ids, project_id, domain_id, user_id) def _extract_v2_token_data(self, token_data): user_id = token_data['access']['user']['id'] expires_at = token_data['access']['token']['expires'] audit_ids = token_data['access']['token'].get('audit_ids') methods = ['password'] if audit_ids: parent_audit_id = token_data['access']['token'].get( 'parent_audit_id') audit_ids = provider.audit_info(parent_audit_id) if parent_audit_id: methods.append('token') project_id = token_data['access']['token'].get('tenant', {}).get('id') domain_id = None trust_id = None access_token_id = None federated_info = None return (user_id, expires_at, audit_ids, methods, domain_id, project_id, trust_id, access_token_id, federated_info) def _extract_v3_token_data(self, token_data): """Extract information from a v3 token reference.""" user_id = token_data['token']['user']['id'] expires_at = token_data['token']['expires_at'] audit_ids = token_data['token']['audit_ids'] methods = token_data['token'].get('methods') domain_id = token_data['token'].get('domain', {}).get('id') project_id = token_data['token'].get('project', {}).get('id') trust_id = token_data['token'].get('OS-TRUST:trust', {}).get('id') access_token_id = token_data['token'].get('OS-OAUTH1', {}).get( 'access_token_id') federated_info = self._build_federated_info(token_data) return (user_id, expires_at, audit_ids, methods, domain_id, project_id, trust_id, access_token_id, federated_info) def _get_token_id(self, token_data): """Generate the token_id based upon the data in token_data. :param token_data: token information :type token_data: dict :rtype: six.text_type """ # NOTE(lbragstad): Only v2.0 token responses include an 'access' # attribute. if token_data.get('access'): (user_id, expires_at, audit_ids, methods, domain_id, project_id, trust_id, access_token_id, federated_info) = ( self._extract_v2_token_data(token_data)) else: (user_id, expires_at, audit_ids, methods, domain_id, project_id, trust_id, access_token_id, federated_info) = ( self._extract_v3_token_data(token_data)) return self.token_formatter.create_token( user_id, expires_at, audit_ids, methods=methods, domain_id=domain_id, project_id=project_id, trust_id=trust_id, federated_info=federated_info, access_token_id=access_token_id ) @property def _supports_bind_authentication(self): """Return if the token provider supports bind authentication methods. :returns: False """ return False keystone-9.0.0/keystone/token/providers/fernet/token_formatters.py0000664000567000056710000006415612701407102026751 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import datetime import struct import uuid from cryptography import fernet import msgpack from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils from six.moves import map from six.moves import urllib from keystone.auth import plugins as auth_plugins from keystone.common import utils as ks_utils from keystone import exception from keystone.i18n import _, _LI from keystone.token import provider from keystone.token.providers.fernet import utils CONF = cfg.CONF LOG = log.getLogger(__name__) # Fernet byte indexes as as computed by pypi/keyless_fernet and defined in # https://github.com/fernet/spec TIMESTAMP_START = 1 TIMESTAMP_END = 9 class TokenFormatter(object): """Packs and unpacks payloads into tokens for transport.""" @property def crypto(self): """Return a cryptography instance. You can extend this class with a custom crypto @property to provide your own token encoding / decoding. For example, using a different cryptography library (e.g. ``python-keyczar``) or to meet arbitrary security requirements. This @property just needs to return an object that implements ``encrypt(plaintext)`` and ``decrypt(ciphertext)``. """ keys = utils.load_keys() if not keys: raise exception.KeysNotFound() fernet_instances = [fernet.Fernet(key) for key in keys] return fernet.MultiFernet(fernet_instances) def pack(self, payload): """Pack a payload for transport as a token. :type payload: six.binary_type :rtype: six.text_type """ # base64 padding (if any) is not URL-safe return self.crypto.encrypt(payload).rstrip(b'=').decode('utf-8') def unpack(self, token): """Unpack a token, and validate the payload. :type token: six.text_type :rtype: six.binary_type """ # TODO(lbragstad): Restore padding on token before decoding it. # Initially in Kilo, Fernet tokens were returned to the user with # padding appended to the token. Later in Liberty this padding was # removed and restored in the Fernet provider. The following if # statement ensures that we can validate tokens with and without token # padding, in the event of an upgrade and the tokens that are issued # throughout the upgrade. Remove this if statement when Mitaka opens # for development and exclusively use the restore_padding() class # method. if token.endswith('%3D'): token = urllib.parse.unquote(token) else: token = TokenFormatter.restore_padding(token) try: return self.crypto.decrypt(token.encode('utf-8')) except fernet.InvalidToken: raise exception.ValidationError( _('This is not a recognized Fernet token %s') % token) @classmethod def restore_padding(cls, token): """Restore padding based on token size. :param token: token to restore padding on :type token: six.text_type :returns: token with correct padding """ # Re-inflate the padding mod_returned = len(token) % 4 if mod_returned: missing_padding = 4 - mod_returned token += '=' * missing_padding return token @classmethod def creation_time(cls, fernet_token): """Returns the creation time of a valid Fernet token. :type fernet_token: six.text_type """ fernet_token = TokenFormatter.restore_padding(fernet_token) # fernet_token is six.text_type # Fernet tokens are base64 encoded, so we need to unpack them first # urlsafe_b64decode() requires six.binary_type token_bytes = base64.urlsafe_b64decode(fernet_token.encode('utf-8')) # slice into the byte array to get just the timestamp timestamp_bytes = token_bytes[TIMESTAMP_START:TIMESTAMP_END] # convert those bytes to an integer # (it's a 64-bit "unsigned long long int" in C) timestamp_int = struct.unpack(">Q", timestamp_bytes)[0] # and with an integer, it's trivial to produce a datetime object created_at = datetime.datetime.utcfromtimestamp(timestamp_int) return created_at def create_token(self, user_id, expires_at, audit_ids, methods=None, domain_id=None, project_id=None, trust_id=None, federated_info=None, access_token_id=None): """Given a set of payload attributes, generate a Fernet token.""" for payload_class in PAYLOAD_CLASSES: if payload_class.create_arguments_apply( project_id=project_id, domain_id=domain_id, trust_id=trust_id, federated_info=federated_info, access_token_id=access_token_id): break version = payload_class.version payload = payload_class.assemble( user_id, methods, project_id, domain_id, expires_at, audit_ids, trust_id, federated_info, access_token_id ) versioned_payload = (version,) + payload serialized_payload = msgpack.packb(versioned_payload) token = self.pack(serialized_payload) # NOTE(lbragstad): We should warn against Fernet tokens that are over # 255 characters in length. This is mostly due to persisting the tokens # in a backend store of some kind that might have a limit of 255 # characters. Even though Keystone isn't storing a Fernet token # anywhere, we can't say it isn't being stored somewhere else with # those kind of backend constraints. if len(token) > 255: LOG.info(_LI('Fernet token created with length of %d ' 'characters, which exceeds 255 characters'), len(token)) return token def validate_token(self, token): """Validates a Fernet token and returns the payload attributes. :type token: six.text_type """ serialized_payload = self.unpack(token) versioned_payload = msgpack.unpackb(serialized_payload) version, payload = versioned_payload[0], versioned_payload[1:] for payload_class in PAYLOAD_CLASSES: if version == payload_class.version: (user_id, methods, project_id, domain_id, expires_at, audit_ids, trust_id, federated_info, access_token_id) = ( payload_class.disassemble(payload)) break else: # If the token_format is not recognized, raise ValidationError. raise exception.ValidationError(_( 'This is not a recognized Fernet payload version: %s') % version) # rather than appearing in the payload, the creation time is encoded # into the token format itself created_at = TokenFormatter.creation_time(token) created_at = ks_utils.isotime(at=created_at, subsecond=True) expires_at = timeutils.parse_isotime(expires_at) expires_at = ks_utils.isotime(at=expires_at, subsecond=True) return (user_id, methods, audit_ids, domain_id, project_id, trust_id, federated_info, access_token_id, created_at, expires_at) class BasePayload(object): # each payload variant should have a unique version version = None @classmethod def create_arguments_apply(cls, **kwargs): """Check the arguments to see if they apply to this payload variant. :returns: True if the arguments indicate that this payload class is needed for the token otherwise returns False. :rtype: bool """ raise NotImplementedError() @classmethod def assemble(cls, user_id, methods, project_id, domain_id, expires_at, audit_ids, trust_id, federated_info, access_token_id): """Assemble the payload of a token. :param user_id: identifier of the user in the token request :param methods: list of authentication methods used :param project_id: ID of the project to scope to :param domain_id: ID of the domain to scope to :param expires_at: datetime of the token's expiration :param audit_ids: list of the token's audit IDs :param trust_id: ID of the trust in effect :param federated_info: dictionary containing group IDs, the identity provider ID, protocol ID, and federated domain ID :param access_token_id: ID of the secret in OAuth1 authentication :returns: the payload of a token """ raise NotImplementedError() @classmethod def disassemble(cls, payload): """Disassemble an unscoped payload into the component data. The tuple consists of:: (user_id, methods, project_id, domain_id, expires_at_str, audit_ids, trust_id, federated_info, access_token_id) * ``methods`` are the auth methods. * federated_info is a dict contains the group IDs, the identity provider ID, the protocol ID, and the federated domain ID Fields will be set to None if they didn't apply to this payload type. :param payload: this variant of payload :returns: a tuple of the payloads component data """ raise NotImplementedError() @classmethod def convert_uuid_hex_to_bytes(cls, uuid_string): """Compress UUID formatted strings to bytes. :param uuid_string: uuid string to compress to bytes :returns: a byte representation of the uuid """ uuid_obj = uuid.UUID(uuid_string) return uuid_obj.bytes @classmethod def convert_uuid_bytes_to_hex(cls, uuid_byte_string): """Generate uuid.hex format based on byte string. :param uuid_byte_string: uuid string to generate from :returns: uuid hex formatted string """ uuid_obj = uuid.UUID(bytes=uuid_byte_string) return uuid_obj.hex @classmethod def _convert_time_string_to_float(cls, time_string): """Convert a time formatted string to a float. :param time_string: time formatted string :returns: a timestamp as a float """ time_object = timeutils.parse_isotime(time_string) return (timeutils.normalize_time(time_object) - datetime.datetime.utcfromtimestamp(0)).total_seconds() @classmethod def _convert_float_to_time_string(cls, time_float): """Convert a floating point timestamp to a string. :param time_float: integer representing timestamp :returns: a time formatted strings """ time_object = datetime.datetime.utcfromtimestamp(time_float) return ks_utils.isotime(time_object, subsecond=True) @classmethod def attempt_convert_uuid_hex_to_bytes(cls, value): """Attempt to convert value to bytes or return value. :param value: value to attempt to convert to bytes :returns: tuple containing boolean indicating whether user_id was stored as bytes and uuid value as bytes or the original value """ try: return (True, cls.convert_uuid_hex_to_bytes(value)) except ValueError: # this might not be a UUID, depending on the situation (i.e. # federation) return (False, value) class UnscopedPayload(BasePayload): version = 0 @classmethod def create_arguments_apply(cls, **kwargs): return True @classmethod def assemble(cls, user_id, methods, project_id, domain_id, expires_at, audit_ids, trust_id, federated_info, access_token_id): b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id) methods = auth_plugins.convert_method_list_to_integer(methods) expires_at_int = cls._convert_time_string_to_float(expires_at) b_audit_ids = list(map(provider.random_urlsafe_str_to_bytes, audit_ids)) return (b_user_id, methods, expires_at_int, b_audit_ids) @classmethod def disassemble(cls, payload): (is_stored_as_bytes, user_id) = payload[0] if is_stored_as_bytes: user_id = cls.convert_uuid_bytes_to_hex(user_id) methods = auth_plugins.convert_integer_to_method_list(payload[1]) expires_at_str = cls._convert_float_to_time_string(payload[2]) audit_ids = list(map(provider.base64_encode, payload[3])) project_id = None domain_id = None trust_id = None federated_info = None access_token_id = None return (user_id, methods, project_id, domain_id, expires_at_str, audit_ids, trust_id, federated_info, access_token_id) class DomainScopedPayload(BasePayload): version = 1 @classmethod def create_arguments_apply(cls, **kwargs): return kwargs['domain_id'] @classmethod def assemble(cls, user_id, methods, project_id, domain_id, expires_at, audit_ids, trust_id, federated_info, access_token_id): b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id) methods = auth_plugins.convert_method_list_to_integer(methods) try: b_domain_id = cls.convert_uuid_hex_to_bytes(domain_id) except ValueError: # the default domain ID is configurable, and probably isn't a UUID if domain_id == CONF.identity.default_domain_id: b_domain_id = domain_id else: raise expires_at_int = cls._convert_time_string_to_float(expires_at) b_audit_ids = list(map(provider.random_urlsafe_str_to_bytes, audit_ids)) return (b_user_id, methods, b_domain_id, expires_at_int, b_audit_ids) @classmethod def disassemble(cls, payload): (is_stored_as_bytes, user_id) = payload[0] if is_stored_as_bytes: user_id = cls.convert_uuid_bytes_to_hex(user_id) methods = auth_plugins.convert_integer_to_method_list(payload[1]) try: domain_id = cls.convert_uuid_bytes_to_hex(payload[2]) except ValueError: # the default domain ID is configurable, and probably isn't a UUID if payload[2] == CONF.identity.default_domain_id: domain_id = payload[2] else: raise expires_at_str = cls._convert_float_to_time_string(payload[3]) audit_ids = list(map(provider.base64_encode, payload[4])) project_id = None trust_id = None federated_info = None access_token_id = None return (user_id, methods, project_id, domain_id, expires_at_str, audit_ids, trust_id, federated_info, access_token_id) class ProjectScopedPayload(BasePayload): version = 2 @classmethod def create_arguments_apply(cls, **kwargs): return kwargs['project_id'] @classmethod def assemble(cls, user_id, methods, project_id, domain_id, expires_at, audit_ids, trust_id, federated_info, access_token_id): b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id) methods = auth_plugins.convert_method_list_to_integer(methods) b_project_id = cls.attempt_convert_uuid_hex_to_bytes(project_id) expires_at_int = cls._convert_time_string_to_float(expires_at) b_audit_ids = list(map(provider.random_urlsafe_str_to_bytes, audit_ids)) return (b_user_id, methods, b_project_id, expires_at_int, b_audit_ids) @classmethod def disassemble(cls, payload): (is_stored_as_bytes, user_id) = payload[0] if is_stored_as_bytes: user_id = cls.convert_uuid_bytes_to_hex(user_id) methods = auth_plugins.convert_integer_to_method_list(payload[1]) (is_stored_as_bytes, project_id) = payload[2] if is_stored_as_bytes: project_id = cls.convert_uuid_bytes_to_hex(project_id) expires_at_str = cls._convert_float_to_time_string(payload[3]) audit_ids = list(map(provider.base64_encode, payload[4])) domain_id = None trust_id = None federated_info = None access_token_id = None return (user_id, methods, project_id, domain_id, expires_at_str, audit_ids, trust_id, federated_info, access_token_id) class TrustScopedPayload(BasePayload): version = 3 @classmethod def create_arguments_apply(cls, **kwargs): return kwargs['trust_id'] @classmethod def assemble(cls, user_id, methods, project_id, domain_id, expires_at, audit_ids, trust_id, federated_info, access_token_id): b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id) methods = auth_plugins.convert_method_list_to_integer(methods) b_project_id = cls.attempt_convert_uuid_hex_to_bytes(project_id) b_trust_id = cls.convert_uuid_hex_to_bytes(trust_id) expires_at_int = cls._convert_time_string_to_float(expires_at) b_audit_ids = list(map(provider.random_urlsafe_str_to_bytes, audit_ids)) return (b_user_id, methods, b_project_id, expires_at_int, b_audit_ids, b_trust_id) @classmethod def disassemble(cls, payload): (is_stored_as_bytes, user_id) = payload[0] if is_stored_as_bytes: user_id = cls.convert_uuid_bytes_to_hex(user_id) methods = auth_plugins.convert_integer_to_method_list(payload[1]) (is_stored_as_bytes, project_id) = payload[2] if is_stored_as_bytes: project_id = cls.convert_uuid_bytes_to_hex(project_id) expires_at_str = cls._convert_float_to_time_string(payload[3]) audit_ids = list(map(provider.base64_encode, payload[4])) trust_id = cls.convert_uuid_bytes_to_hex(payload[5]) domain_id = None federated_info = None access_token_id = None return (user_id, methods, project_id, domain_id, expires_at_str, audit_ids, trust_id, federated_info, access_token_id) class FederatedUnscopedPayload(BasePayload): version = 4 @classmethod def create_arguments_apply(cls, **kwargs): return kwargs['federated_info'] @classmethod def pack_group_id(cls, group_dict): return cls.attempt_convert_uuid_hex_to_bytes(group_dict['id']) @classmethod def unpack_group_id(cls, group_id_in_bytes): (is_stored_as_bytes, group_id) = group_id_in_bytes if is_stored_as_bytes: group_id = cls.convert_uuid_bytes_to_hex(group_id) return {'id': group_id} @classmethod def assemble(cls, user_id, methods, project_id, domain_id, expires_at, audit_ids, trust_id, federated_info, access_token_id): b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id) methods = auth_plugins.convert_method_list_to_integer(methods) b_group_ids = list(map(cls.pack_group_id, federated_info['group_ids'])) b_idp_id = cls.attempt_convert_uuid_hex_to_bytes( federated_info['idp_id']) protocol_id = federated_info['protocol_id'] expires_at_int = cls._convert_time_string_to_float(expires_at) b_audit_ids = list(map(provider.random_urlsafe_str_to_bytes, audit_ids)) return (b_user_id, methods, b_group_ids, b_idp_id, protocol_id, expires_at_int, b_audit_ids) @classmethod def disassemble(cls, payload): (is_stored_as_bytes, user_id) = payload[0] if is_stored_as_bytes: user_id = cls.convert_uuid_bytes_to_hex(user_id) methods = auth_plugins.convert_integer_to_method_list(payload[1]) group_ids = list(map(cls.unpack_group_id, payload[2])) (is_stored_as_bytes, idp_id) = payload[3] if is_stored_as_bytes: idp_id = cls.convert_uuid_bytes_to_hex(idp_id) protocol_id = payload[4] expires_at_str = cls._convert_float_to_time_string(payload[5]) audit_ids = list(map(provider.base64_encode, payload[6])) federated_info = dict(group_ids=group_ids, idp_id=idp_id, protocol_id=protocol_id) project_id = None domain_id = None trust_id = None access_token_id = None return (user_id, methods, project_id, domain_id, expires_at_str, audit_ids, trust_id, federated_info, access_token_id) class FederatedScopedPayload(FederatedUnscopedPayload): version = None @classmethod def assemble(cls, user_id, methods, project_id, domain_id, expires_at, audit_ids, trust_id, federated_info, access_token_id): b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id) methods = auth_plugins.convert_method_list_to_integer(methods) b_scope_id = cls.attempt_convert_uuid_hex_to_bytes( project_id or domain_id) b_group_ids = list(map(cls.pack_group_id, federated_info['group_ids'])) b_idp_id = cls.attempt_convert_uuid_hex_to_bytes( federated_info['idp_id']) protocol_id = federated_info['protocol_id'] expires_at_int = cls._convert_time_string_to_float(expires_at) b_audit_ids = list(map(provider.random_urlsafe_str_to_bytes, audit_ids)) return (b_user_id, methods, b_scope_id, b_group_ids, b_idp_id, protocol_id, expires_at_int, b_audit_ids) @classmethod def disassemble(cls, payload): (is_stored_as_bytes, user_id) = payload[0] if is_stored_as_bytes: user_id = cls.convert_uuid_bytes_to_hex(user_id) methods = auth_plugins.convert_integer_to_method_list(payload[1]) (is_stored_as_bytes, scope_id) = payload[2] if is_stored_as_bytes: scope_id = cls.convert_uuid_bytes_to_hex(scope_id) project_id = ( scope_id if cls.version == FederatedProjectScopedPayload.version else None) domain_id = ( scope_id if cls.version == FederatedDomainScopedPayload.version else None) group_ids = list(map(cls.unpack_group_id, payload[3])) (is_stored_as_bytes, idp_id) = payload[4] if is_stored_as_bytes: idp_id = cls.convert_uuid_bytes_to_hex(idp_id) protocol_id = payload[5] expires_at_str = cls._convert_float_to_time_string(payload[6]) audit_ids = list(map(provider.base64_encode, payload[7])) federated_info = dict(idp_id=idp_id, protocol_id=protocol_id, group_ids=group_ids) trust_id = None access_token_id = None return (user_id, methods, project_id, domain_id, expires_at_str, audit_ids, trust_id, federated_info, access_token_id) class FederatedProjectScopedPayload(FederatedScopedPayload): version = 5 @classmethod def create_arguments_apply(cls, **kwargs): return kwargs['project_id'] and kwargs['federated_info'] class FederatedDomainScopedPayload(FederatedScopedPayload): version = 6 @classmethod def create_arguments_apply(cls, **kwargs): return kwargs['domain_id'] and kwargs['federated_info'] class OauthScopedPayload(BasePayload): version = 7 @classmethod def create_arguments_apply(cls, **kwargs): return kwargs['access_token_id'] @classmethod def assemble(cls, user_id, methods, project_id, domain_id, expires_at, audit_ids, trust_id, federated_info, access_token_id): b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id) methods = auth_plugins.convert_method_list_to_integer(methods) b_project_id = cls.attempt_convert_uuid_hex_to_bytes(project_id) expires_at_int = cls._convert_time_string_to_float(expires_at) b_audit_ids = list(map(provider.random_urlsafe_str_to_bytes, audit_ids)) b_access_token_id = cls.attempt_convert_uuid_hex_to_bytes( access_token_id) return (b_user_id, methods, b_project_id, b_access_token_id, expires_at_int, b_audit_ids) @classmethod def disassemble(cls, payload): (is_stored_as_bytes, user_id) = payload[0] if is_stored_as_bytes: user_id = cls.convert_uuid_bytes_to_hex(user_id) methods = auth_plugins.convert_integer_to_method_list(payload[1]) (is_stored_as_bytes, project_id) = payload[2] if is_stored_as_bytes: project_id = cls.convert_uuid_bytes_to_hex(project_id) (is_stored_as_bytes, access_token_id) = payload[3] if is_stored_as_bytes: access_token_id = cls.convert_uuid_bytes_to_hex(access_token_id) expires_at_str = cls._convert_float_to_time_string(payload[4]) audit_ids = list(map(provider.base64_encode, payload[5])) domain_id = None trust_id = None federated_info = None return (user_id, methods, project_id, domain_id, expires_at_str, audit_ids, trust_id, federated_info, access_token_id) # For now, the order of the classes in the following list is important. This # is because the way they test that the payload applies to them in # the create_arguments_apply method requires that the previous ones rejected # the payload arguments. For example, UnscopedPayload must be last since it's # the catch-all after all the other payloads have been checked. # TODO(blk-u): Clean up the create_arguments_apply methods so that they don't # depend on the previous classes then these can be in any order. PAYLOAD_CLASSES = [ OauthScopedPayload, TrustScopedPayload, FederatedProjectScopedPayload, FederatedDomainScopedPayload, FederatedUnscopedPayload, ProjectScopedPayload, DomainScopedPayload, UnscopedPayload, ] keystone-9.0.0/keystone/token/providers/uuid.py0000664000567000056710000000232112701407102023030 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Keystone UUID Token Provider""" from __future__ import absolute_import import uuid from keystone.token.providers import common class Provider(common.BaseProvider): def __init__(self, *args, **kwargs): super(Provider, self).__init__(*args, **kwargs) def _get_token_id(self, token_data): return uuid.uuid4().hex @property def _supports_bind_authentication(self): """Return if the token provider supports bind authentication methods. :returns: True """ return True def needs_persistence(self): """Should the token be written to a backend.""" return True keystone-9.0.0/keystone/token/providers/__init__.py0000664000567000056710000000000012701407102023611 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/token/providers/pkiz.py0000664000567000056710000000431512701407102023044 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Keystone Compressed PKI Token Provider""" from keystoneclient.common import cms from oslo_config import cfg from oslo_log import log from oslo_log import versionutils from oslo_serialization import jsonutils from keystone.common import environment from keystone.common import utils from keystone import exception from keystone.i18n import _ from keystone.token.providers import common CONF = cfg.CONF LOG = log.getLogger(__name__) ERROR_MESSAGE = _('Unable to sign token.') @versionutils.deprecated( as_of=versionutils.deprecated.MITAKA, what='the PKIZ token provider', in_favor_of='the Fernet or UUID token providers') class Provider(common.BaseProvider): def _get_token_id(self, token_data): try: # force conversion to a string as the keystone client cms code # produces unicode. This can be removed if the client returns # str() # TODO(ayoung): Make to a byte_str for Python3 token_json = jsonutils.dumps(token_data, cls=utils.PKIEncoder) token_id = str(cms.pkiz_sign(token_json, CONF.signing.certfile, CONF.signing.keyfile)) return token_id except environment.subprocess.CalledProcessError: LOG.exception(ERROR_MESSAGE) raise exception.UnexpectedError(ERROR_MESSAGE) @property def _supports_bind_authentication(self): """Return if the token provider supports bind authentication methods. :returns: True """ return True def needs_persistence(self): """Should the token be written to a backend.""" return True keystone-9.0.0/keystone/token/providers/common.py0000664000567000056710000010462412701407102023363 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log from oslo_serialization import jsonutils import six from six.moves.urllib import parse from keystone.common import controller as common_controller from keystone.common import dependency from keystone.common import utils from keystone import exception from keystone.federation import constants as federation_constants from keystone.i18n import _, _LE from keystone import token from keystone.token import provider LOG = log.getLogger(__name__) CONF = cfg.CONF @dependency.requires('catalog_api', 'resource_api', 'assignment_api') class V2TokenDataHelper(object): """Creates V2 token data.""" def v3_to_v2_token(self, v3_token_data): """Convert v3 token data into v2.0 token data. This method expects a dictionary generated from V3TokenDataHelper.get_token_data() and converts it to look like a v2.0 token dictionary. :param v3_token_data: dictionary formatted for v3 tokens :returns: dictionary formatted for v2 tokens :raises keystone.exception.Unauthorized: If a specific token type is not supported in v2. """ token_data = {} # Build v2 token v3_token = v3_token_data['token'] # NOTE(lbragstad): Version 2.0 tokens don't know about any domain other # than the default domain specified in the configuration. domain_id = v3_token.get('domain', {}).get('id') if domain_id and CONF.identity.default_domain_id != domain_id: msg = ('Unable to validate domain-scoped tokens outside of the ' 'default domain') raise exception.Unauthorized(msg) token = {} token['expires'] = v3_token.get('expires_at') token['issued_at'] = v3_token.get('issued_at') token['audit_ids'] = v3_token.get('audit_ids') if 'project' in v3_token: # v3 token_data does not contain all tenant attributes tenant = self.resource_api.get_project( v3_token['project']['id']) # Drop domain specific fields since v2 calls are not domain-aware. token['tenant'] = common_controller.V2Controller.v3_to_v2_project( tenant) token_data['token'] = token # Build v2 user v3_user = v3_token['user'] user = common_controller.V2Controller.v3_to_v2_user(v3_user) if 'OS-TRUST:trust' in v3_token: msg = ('Unable to validate trust-scoped tokens using version v2.0 ' 'API.') raise exception.Unauthorized(msg) if 'OS-OAUTH1' in v3_token: msg = ('Unable to validate Oauth tokens using the version v2.0 ' 'API.') raise exception.Unauthorized(msg) # Set user roles user['roles'] = [] role_ids = [] for role in v3_token.get('roles', []): role_ids.append(role.pop('id')) user['roles'].append(role) user['roles_links'] = [] token_data['user'] = user # Get and build v2 service catalog token_data['serviceCatalog'] = [] if 'tenant' in token: catalog_ref = self.catalog_api.get_catalog( user['id'], token['tenant']['id']) if catalog_ref: token_data['serviceCatalog'] = self.format_catalog(catalog_ref) # Build v2 metadata metadata = {} metadata['roles'] = role_ids # Setting is_admin to keep consistency in v2 response metadata['is_admin'] = 0 token_data['metadata'] = metadata return {'access': token_data} @classmethod def format_token(cls, token_ref, roles_ref=None, catalog_ref=None, trust_ref=None): audit_info = None user_ref = token_ref['user'] metadata_ref = token_ref['metadata'] if roles_ref is None: roles_ref = [] expires = token_ref.get('expires', provider.default_expire_time()) if expires is not None: if not isinstance(expires, six.text_type): expires = utils.isotime(expires) token_data = token_ref.get('token_data') if token_data: token_audit = token_data.get( 'access', token_data).get('token', {}).get('audit_ids') audit_info = token_audit if audit_info is None: audit_info = provider.audit_info(token_ref.get('parent_audit_id')) o = {'access': {'token': {'id': token_ref['id'], 'expires': expires, 'issued_at': utils.isotime(subsecond=True), 'audit_ids': audit_info }, 'user': {'id': user_ref['id'], 'name': user_ref['name'], 'username': user_ref['name'], 'roles': roles_ref, 'roles_links': metadata_ref.get('roles_links', []) } } } if 'bind' in token_ref: o['access']['token']['bind'] = token_ref['bind'] if 'tenant' in token_ref and token_ref['tenant']: token_ref['tenant']['enabled'] = True o['access']['token']['tenant'] = token_ref['tenant'] if catalog_ref is not None: o['access']['serviceCatalog'] = V2TokenDataHelper.format_catalog( catalog_ref) if metadata_ref: if 'is_admin' in metadata_ref: o['access']['metadata'] = {'is_admin': metadata_ref['is_admin']} else: o['access']['metadata'] = {'is_admin': 0} if 'roles' in metadata_ref: o['access']['metadata']['roles'] = metadata_ref['roles'] if CONF.trust.enabled and trust_ref: o['access']['trust'] = {'trustee_user_id': trust_ref['trustee_user_id'], 'id': trust_ref['id'], 'trustor_user_id': trust_ref['trustor_user_id'], 'impersonation': trust_ref['impersonation'] } return o @classmethod def format_catalog(cls, catalog_ref): """Munge catalogs from internal to output format. Internal catalogs look like:: {$REGION: { {$SERVICE: { $key1: $value1, ... } } } The legacy api wants them to look like:: [{'name': $SERVICE[name], 'type': $SERVICE, 'endpoints': [{ 'tenantId': $tenant_id, ... 'region': $REGION, }], 'endpoints_links': [], }] """ if not catalog_ref: return [] services = {} for region, region_ref in catalog_ref.items(): for service, service_ref in region_ref.items(): new_service_ref = services.get(service, {}) new_service_ref['name'] = service_ref.pop('name') new_service_ref['type'] = service new_service_ref['endpoints_links'] = [] service_ref['region'] = region endpoints_ref = new_service_ref.get('endpoints', []) endpoints_ref.append(service_ref) new_service_ref['endpoints'] = endpoints_ref services[service] = new_service_ref return list(services.values()) @dependency.requires('assignment_api', 'catalog_api', 'federation_api', 'identity_api', 'resource_api', 'role_api', 'trust_api') class V3TokenDataHelper(object): """Token data helper.""" def __init__(self): # Keep __init__ around to ensure dependency injection works. super(V3TokenDataHelper, self).__init__() def _get_filtered_domain(self, domain_id): domain_ref = self.resource_api.get_domain(domain_id) return {'id': domain_ref['id'], 'name': domain_ref['name']} def _get_filtered_project(self, project_id): project_ref = self.resource_api.get_project(project_id) filtered_project = { 'id': project_ref['id'], 'name': project_ref['name']} if project_ref['domain_id'] is not None: filtered_project['domain'] = ( self._get_filtered_domain(project_ref['domain_id'])) else: # Projects acting as a domain do not have a domain_id attribute filtered_project['domain'] = None return filtered_project def _populate_scope(self, token_data, domain_id, project_id): if 'domain' in token_data or 'project' in token_data: # scope already exist, no need to populate it again return if domain_id: token_data['domain'] = self._get_filtered_domain(domain_id) if project_id: token_data['project'] = self._get_filtered_project(project_id) def _populate_is_admin_project(self, token_data): # TODO(ayoung): Support the ability for a project acting as a domain # to be the admin project once the rest of the code for projects # acting as domains is merged. Code will likely be: # (r.admin_project_name == None and project['is_domain'] == True # and project['name'] == r.admin_project_domain_name) project = token_data['project'] r = CONF.resource if (project['name'] == r.admin_project_name and project['domain']['name'] == r.admin_project_domain_name): token_data['is_admin_project'] = True def _get_roles_for_user(self, user_id, domain_id, project_id): roles = [] if domain_id: roles = self.assignment_api.get_roles_for_user_and_domain( user_id, domain_id) if project_id: roles = self.assignment_api.get_roles_for_user_and_project( user_id, project_id) return [self.role_api.get_role(role_id) for role_id in roles] def populate_roles_for_groups(self, token_data, group_ids, project_id=None, domain_id=None, user_id=None): """Populate roles basing on provided groups and project/domain Used for ephemeral users with dynamically assigned groups. This method does not return anything, yet it modifies token_data in place. :param token_data: a dictionary used for building token response :param group_ids: list of group IDs a user is a member of :param project_id: project ID to scope to :param domain_id: domain ID to scope to :param user_id: user ID :raises keystone.exception.Unauthorized: when no roles were found for a (group_ids, project_id) or (group_ids, domain_id) pairs. """ def check_roles(roles, user_id, project_id, domain_id): # User was granted roles so simply exit this function. if roles: return if project_id: msg = _('User %(user_id)s has no access ' 'to project %(project_id)s') % { 'user_id': user_id, 'project_id': project_id} elif domain_id: msg = _('User %(user_id)s has no access ' 'to domain %(domain_id)s') % { 'user_id': user_id, 'domain_id': domain_id} # Since no roles were found a user is not authorized to # perform any operations. Raise an exception with # appropriate error message. raise exception.Unauthorized(msg) roles = self.assignment_api.get_roles_for_groups(group_ids, project_id, domain_id) check_roles(roles, user_id, project_id, domain_id) token_data['roles'] = roles def _populate_user(self, token_data, user_id, trust): if 'user' in token_data: # no need to repopulate user if it already exists return user_ref = self.identity_api.get_user(user_id) if CONF.trust.enabled and trust and 'OS-TRUST:trust' not in token_data: trustor_user_ref = (self.identity_api.get_user( trust['trustor_user_id'])) try: self.identity_api.assert_user_enabled(trust['trustor_user_id']) except AssertionError: raise exception.Forbidden(_('Trustor is disabled.')) if trust['impersonation']: user_ref = trustor_user_ref token_data['OS-TRUST:trust'] = ( { 'id': trust['id'], 'trustor_user': {'id': trust['trustor_user_id']}, 'trustee_user': {'id': trust['trustee_user_id']}, 'impersonation': trust['impersonation'] }) filtered_user = { 'id': user_ref['id'], 'name': user_ref['name'], 'domain': self._get_filtered_domain(user_ref['domain_id'])} token_data['user'] = filtered_user def _populate_oauth_section(self, token_data, access_token): if access_token: access_token_id = access_token['id'] consumer_id = access_token['consumer_id'] token_data['OS-OAUTH1'] = ({'access_token_id': access_token_id, 'consumer_id': consumer_id}) def _populate_roles(self, token_data, user_id, domain_id, project_id, trust, access_token): if 'roles' in token_data: # no need to repopulate roles return if access_token: filtered_roles = [] authed_role_ids = jsonutils.loads(access_token['role_ids']) all_roles = self.role_api.list_roles() for role in all_roles: for authed_role in authed_role_ids: if authed_role == role['id']: filtered_roles.append({'id': role['id'], 'name': role['name']}) token_data['roles'] = filtered_roles return if CONF.trust.enabled and trust: # If redelegated_trust_id is set, then we must traverse the # trust_chain in order to determine who the original trustor is. We # need to do this because the user ID of the original trustor helps # us determine scope in the redelegated context. if trust.get('redelegated_trust_id'): trust_chain = self.trust_api.get_trust_pedigree(trust['id']) token_user_id = trust_chain[-1]['trustor_user_id'] else: token_user_id = trust['trustor_user_id'] token_project_id = trust['project_id'] # trusts do not support domains yet token_domain_id = None else: token_user_id = user_id token_project_id = project_id token_domain_id = domain_id if token_domain_id or token_project_id: filtered_roles = [] if CONF.trust.enabled and trust: # First expand out any roles that were in the trust to include # any implied roles, whether global or domain specific refs = [{'role_id': role['id']} for role in trust['roles']] effective_trust_roles = ( self.assignment_api.add_implied_roles(refs)) # Now get the current role assignments for the trustor, # including any domain specific roles. assignment_list = self.assignment_api.list_role_assignments( user_id=token_user_id, project_id=token_project_id, effective=True, strip_domain_roles=False) current_effective_trustor_roles = ( list(set([x['role_id'] for x in assignment_list]))) # Go through each of the effective trust roles, making sure the # trustor still has them, if any have been removed, then we # will treat the trust as invalid for trust_role in effective_trust_roles: match_roles = [x for x in current_effective_trustor_roles if x == trust_role['role_id']] if match_roles: role = self.role_api.get_role(match_roles[0]) if role['domain_id'] is None: filtered_roles.append(role) else: raise exception.Forbidden( _('Trustee has no delegated roles.')) else: for role in self._get_roles_for_user(token_user_id, token_domain_id, token_project_id): filtered_roles.append({'id': role['id'], 'name': role['name']}) # user has no project or domain roles, therefore access denied if not filtered_roles: if token_project_id: msg = _('User %(user_id)s has no access ' 'to project %(project_id)s') % { 'user_id': user_id, 'project_id': token_project_id} else: msg = _('User %(user_id)s has no access ' 'to domain %(domain_id)s') % { 'user_id': user_id, 'domain_id': token_domain_id} LOG.debug(msg) raise exception.Unauthorized(msg) token_data['roles'] = filtered_roles def _populate_service_catalog(self, token_data, user_id, domain_id, project_id, trust): if 'catalog' in token_data: # no need to repopulate service catalog return if CONF.trust.enabled and trust: user_id = trust['trustor_user_id'] if project_id or domain_id: service_catalog = self.catalog_api.get_v3_catalog( user_id, project_id) token_data['catalog'] = service_catalog def _populate_service_providers(self, token_data): if 'service_providers' in token_data: return service_providers = self.federation_api.get_enabled_service_providers() if service_providers: token_data['service_providers'] = service_providers def _populate_token_dates(self, token_data, expires=None, trust=None, issued_at=None): if not expires: expires = provider.default_expire_time() if not isinstance(expires, six.string_types): expires = utils.isotime(expires, subsecond=True) token_data['expires_at'] = expires token_data['issued_at'] = (issued_at or utils.isotime(subsecond=True)) def _populate_audit_info(self, token_data, audit_info=None): if audit_info is None or isinstance(audit_info, six.string_types): token_data['audit_ids'] = provider.audit_info(audit_info) elif isinstance(audit_info, list): token_data['audit_ids'] = audit_info else: msg = (_('Invalid audit info data type: %(data)s (%(type)s)') % {'data': audit_info, 'type': type(audit_info)}) LOG.error(msg) raise exception.UnexpectedError(msg) def get_token_data(self, user_id, method_names, domain_id=None, project_id=None, expires=None, trust=None, token=None, include_catalog=True, bind=None, access_token=None, issued_at=None, audit_info=None): token_data = {'methods': method_names} # We've probably already written these to the token if token: for x in ('roles', 'user', 'catalog', 'project', 'domain'): if x in token: token_data[x] = token[x] if bind: token_data['bind'] = bind self._populate_scope(token_data, domain_id, project_id) if token_data.get('project'): self._populate_is_admin_project(token_data) self._populate_user(token_data, user_id, trust) self._populate_roles(token_data, user_id, domain_id, project_id, trust, access_token) self._populate_audit_info(token_data, audit_info) if include_catalog: self._populate_service_catalog(token_data, user_id, domain_id, project_id, trust) self._populate_service_providers(token_data) self._populate_token_dates(token_data, expires=expires, trust=trust, issued_at=issued_at) self._populate_oauth_section(token_data, access_token) return {'token': token_data} @dependency.requires('catalog_api', 'identity_api', 'oauth_api', 'resource_api', 'role_api', 'trust_api') class BaseProvider(provider.Provider): def __init__(self, *args, **kwargs): super(BaseProvider, self).__init__(*args, **kwargs) self.v3_token_data_helper = V3TokenDataHelper() self.v2_token_data_helper = V2TokenDataHelper() def get_token_version(self, token_data): if token_data and isinstance(token_data, dict): if 'token_version' in token_data: if token_data['token_version'] in token.provider.VERSIONS: return token_data['token_version'] # FIXME(morganfainberg): deprecate the following logic in future # revisions. It is better to just specify the token_version in # the token_data itself. This way we can support future versions # that might have the same fields. if 'access' in token_data: return token.provider.V2 if 'token' in token_data and 'methods' in token_data['token']: return token.provider.V3 raise exception.UnsupportedTokenVersionException() def issue_v2_token(self, token_ref, roles_ref=None, catalog_ref=None): if token_ref.get('bind') and not self._supports_bind_authentication: msg = _('The configured token provider does not support bind ' 'authentication.') raise exception.NotImplemented(message=msg) metadata_ref = token_ref['metadata'] trust_ref = None if CONF.trust.enabled and metadata_ref and 'trust_id' in metadata_ref: trust_ref = self.trust_api.get_trust(metadata_ref['trust_id']) token_data = self.v2_token_data_helper.format_token( token_ref, roles_ref, catalog_ref, trust_ref) token_id = self._get_token_id(token_data) token_data['access']['token']['id'] = token_id return token_id, token_data def _is_mapped_token(self, auth_context): return (federation_constants.IDENTITY_PROVIDER in auth_context and federation_constants.PROTOCOL in auth_context) def issue_v3_token(self, user_id, method_names, expires_at=None, project_id=None, domain_id=None, auth_context=None, trust=None, metadata_ref=None, include_catalog=True, parent_audit_id=None): if auth_context and auth_context.get('bind'): # NOTE(lbragstad): Check if the token provider being used actually # supports bind authentication methods before proceeding. if not self._supports_bind_authentication: raise exception.NotImplemented(_( 'The configured token provider does not support bind ' 'authentication.')) # for V2, trust is stashed in metadata_ref if (CONF.trust.enabled and not trust and metadata_ref and 'trust_id' in metadata_ref): trust = self.trust_api.get_trust(metadata_ref['trust_id']) if CONF.trust.enabled and trust: if user_id != trust['trustee_user_id']: raise exception.Forbidden(_('User is not a trustee.')) token_ref = None if auth_context and self._is_mapped_token(auth_context): token_ref = self._handle_mapped_tokens( auth_context, project_id, domain_id) access_token = None if 'oauth1' in method_names: access_token_id = auth_context['access_token_id'] access_token = self.oauth_api.get_access_token(access_token_id) token_data = self.v3_token_data_helper.get_token_data( user_id, method_names, domain_id=domain_id, project_id=project_id, expires=expires_at, trust=trust, bind=auth_context.get('bind') if auth_context else None, token=token_ref, include_catalog=include_catalog, access_token=access_token, audit_info=parent_audit_id) token_id = self._get_token_id(token_data) return token_id, token_data def _handle_mapped_tokens(self, auth_context, project_id, domain_id): user_id = auth_context['user_id'] group_ids = auth_context['group_ids'] idp = auth_context[federation_constants.IDENTITY_PROVIDER] protocol = auth_context[federation_constants.PROTOCOL] token_data = { 'user': { 'id': user_id, 'name': parse.unquote(user_id), federation_constants.FEDERATION: { 'groups': [{'id': x} for x in group_ids], 'identity_provider': {'id': idp}, 'protocol': {'id': protocol} }, 'domain': { 'id': CONF.federation.federated_domain_name, 'name': CONF.federation.federated_domain_name } } } if project_id or domain_id: self.v3_token_data_helper.populate_roles_for_groups( token_data, group_ids, project_id, domain_id, user_id) return token_data def _verify_token_ref(self, token_ref): """Verify and return the given token_ref.""" if not token_ref: raise exception.Unauthorized() return token_ref def _assert_is_not_federation_token(self, token_ref): """Make sure we aren't using v2 auth on a federation token.""" token_data = token_ref.get('token_data') if (token_data and self.get_token_version(token_data) == token.provider.V3): if 'OS-FEDERATION' in token_data['token']['user']: msg = _('Attempting to use OS-FEDERATION token with V2 ' 'Identity Service, use V3 Authentication') raise exception.Unauthorized(msg) def _assert_default_domain(self, token_ref): """Make sure we are operating on default domain only.""" if (token_ref.get('token_data') and self.get_token_version(token_ref.get('token_data')) == token.provider.V3): # this is a V3 token msg = _('Non-default domain is not supported') # domain scoping is prohibited if token_ref['token_data']['token'].get('domain'): raise exception.Unauthorized( _('Domain scoped token is not supported')) # if token is scoped to trust, both trustor and trustee must # be in the default domain. Furthermore, the delegated project # must also be in the default domain metadata_ref = token_ref['metadata'] if CONF.trust.enabled and 'trust_id' in metadata_ref: trust_ref = self.trust_api.get_trust(metadata_ref['trust_id']) trustee_user_ref = self.identity_api.get_user( trust_ref['trustee_user_id']) if (trustee_user_ref['domain_id'] != CONF.identity.default_domain_id): raise exception.Unauthorized(msg) trustor_user_ref = self.identity_api.get_user( trust_ref['trustor_user_id']) if (trustor_user_ref['domain_id'] != CONF.identity.default_domain_id): raise exception.Unauthorized(msg) project_ref = self.resource_api.get_project( trust_ref['project_id']) if (project_ref['domain_id'] != CONF.identity.default_domain_id): raise exception.Unauthorized(msg) def validate_v2_token(self, token_ref): try: self._assert_is_not_federation_token(token_ref) self._assert_default_domain(token_ref) # FIXME(gyee): performance or correctness? Should we return the # cached token or reconstruct it? Obviously if we are going with # the cached token, any role, project, or domain name changes # will not be reflected. One may argue that with PKI tokens, # we are essentially doing cached token validation anyway. # Lets go with the cached token strategy. Since token # management layer is now pluggable, one can always provide # their own implementation to suit their needs. token_data = token_ref.get('token_data') if (self.get_token_version(token_data) != token.provider.V2): # Validate the V3 token as V2 token_data = self.v2_token_data_helper.v3_to_v2_token( token_data) trust_id = token_data['access'].get('trust', {}).get('id') if trust_id: msg = ('Unable to validate trust-scoped tokens using version ' 'v2.0 API.') raise exception.Unauthorized(msg) return token_data except exception.ValidationError: LOG.exception(_LE('Failed to validate token')) token_id = token_ref['token_data']['access']['token']['id'] raise exception.TokenNotFound(token_id=token_id) def validate_non_persistent_token(self, token_id): try: (user_id, methods, audit_ids, domain_id, project_id, trust_id, federated_info, access_token_id, created_at, expires_at) = ( self.token_formatter.validate_token(token_id)) except exception.ValidationError as e: raise exception.TokenNotFound(e) token_dict = None trust_ref = None if federated_info: # NOTE(lbragstad): We need to rebuild information about the # federated token as well as the federated token roles. This is # because when we validate a non-persistent token, we don't have a # token reference to pull the federated token information out of. # As a result, we have to extract it from the token itself and # rebuild the federated context. These private methods currently # live in the keystone.token.providers.fernet.Provider() class. token_dict = self._rebuild_federated_info(federated_info, user_id) if project_id or domain_id: self._rebuild_federated_token_roles(token_dict, federated_info, user_id, project_id, domain_id) if trust_id: trust_ref = self.trust_api.get_trust(trust_id) access_token = None if access_token_id: access_token = self.oauth_api.get_access_token(access_token_id) return self.v3_token_data_helper.get_token_data( user_id, method_names=methods, domain_id=domain_id, project_id=project_id, issued_at=created_at, expires=expires_at, trust=trust_ref, token=token_dict, access_token=access_token, audit_info=audit_ids) def validate_v3_token(self, token_ref): # FIXME(gyee): performance or correctness? Should we return the # cached token or reconstruct it? Obviously if we are going with # the cached token, any role, project, or domain name changes # will not be reflected. One may argue that with PKI tokens, # we are essentially doing cached token validation anyway. # Lets go with the cached token strategy. Since token # management layer is now pluggable, one can always provide # their own implementation to suit their needs. trust_id = token_ref.get('trust_id') if trust_id: # token trust validation self.trust_api.get_trust(trust_id) token_data = token_ref.get('token_data') if not token_data or 'token' not in token_data: # token ref is created by V2 API project_id = None project_ref = token_ref.get('tenant') if project_ref: project_id = project_ref['id'] issued_at = token_ref['token_data']['access']['token']['issued_at'] audit = token_ref['token_data']['access']['token'].get('audit_ids') token_data = self.v3_token_data_helper.get_token_data( token_ref['user']['id'], ['password', 'token'], project_id=project_id, bind=token_ref.get('bind'), expires=token_ref['expires'], issued_at=issued_at, audit_info=audit) return token_data keystone-9.0.0/keystone/notifications.py0000664000567000056710000007052012701407102021604 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Notifications module for OpenStack Identity Service resources""" import collections import functools import inspect import logging import socket from oslo_config import cfg from oslo_log import log import oslo_messaging from oslo_utils import reflection import pycadf from pycadf import cadftaxonomy as taxonomy from pycadf import cadftype from pycadf import credential from pycadf import eventfactory from pycadf import resource from keystone.i18n import _, _LE from keystone.common import utils notifier_opts = [ cfg.StrOpt('default_publisher_id', help='Default publisher_id for outgoing notifications'), cfg.StrOpt('notification_format', default='basic', choices=['basic', 'cadf'], help='Define the notification format for Identity Service ' 'events. A "basic" notification has information about ' 'the resource being operated on. A "cadf" notification ' 'has the same information, as well as information about ' 'the initiator of the event.'), cfg.MultiStrOpt('notification_opt_out', default=[], help='Define the notification options to opt-out from. ' 'The value expected is: ' 'identity... This field ' 'can be set multiple times in order to add more ' 'notifications to opt-out from. For example:\n ' 'notification_opt_out=identity.user.created\n ' 'notification_opt_out=identity.authenticate.success'), ] config_section = None list_opts = lambda: [(config_section, notifier_opts), ] LOG = log.getLogger(__name__) # NOTE(gyee): actions that can be notified. One must update this list whenever # a new action is supported. _ACTIONS = collections.namedtuple( 'NotificationActions', 'created, deleted, disabled, updated, internal') ACTIONS = _ACTIONS(created='created', deleted='deleted', disabled='disabled', updated='updated', internal='internal') """The actions on resources.""" CADF_TYPE_MAP = { 'group': taxonomy.SECURITY_GROUP, 'project': taxonomy.SECURITY_PROJECT, 'role': taxonomy.SECURITY_ROLE, 'user': taxonomy.SECURITY_ACCOUNT_USER, 'domain': taxonomy.SECURITY_DOMAIN, 'region': taxonomy.SECURITY_REGION, 'endpoint': taxonomy.SECURITY_ENDPOINT, 'service': taxonomy.SECURITY_SERVICE, 'policy': taxonomy.SECURITY_POLICY, 'OS-TRUST:trust': taxonomy.SECURITY_TRUST, 'OS-OAUTH1:access_token': taxonomy.SECURITY_CREDENTIAL, 'OS-OAUTH1:request_token': taxonomy.SECURITY_CREDENTIAL, 'OS-OAUTH1:consumer': taxonomy.SECURITY_ACCOUNT, } SAML_AUDIT_TYPE = 'http://docs.oasis-open.org/security/saml/v2.0' # resource types that can be notified _SUBSCRIBERS = {} _notifier = None SERVICE = 'identity' CONF = cfg.CONF CONF.register_opts(notifier_opts) # NOTE(morganfainberg): Special case notifications that are only used # internally for handling token persistence token deletions INVALIDATE_USER_TOKEN_PERSISTENCE = 'invalidate_user_tokens' INVALIDATE_USER_PROJECT_TOKEN_PERSISTENCE = 'invalidate_user_project_tokens' INVALIDATE_USER_OAUTH_CONSUMER_TOKENS = 'invalidate_user_consumer_tokens' class Audit(object): """Namespace for audit notification functions. This is a namespace object to contain all of the direct notification functions utilized for ``Manager`` methods. """ @classmethod def _emit(cls, operation, resource_type, resource_id, initiator, public, actor_dict=None): """Directly send an event notification. :param operation: one of the values from ACTIONS :param resource_type: type of resource being affected :param resource_id: ID of the resource affected :param initiator: CADF representation of the user that created the request :param public: If True (default), the event will be sent to the notifier API. If False, the event will only be sent via notify_event_callbacks to in process listeners :param actor_dict: dictionary of actor information in the event of assignment notification """ # NOTE(stevemar): the _send_notification function is # overloaded, it's used to register callbacks and to actually # send the notification externally. Thus, we should check # the desired notification format in the function instead # of before it. _send_notification( operation, resource_type, resource_id, actor_dict, public=public) if CONF.notification_format == 'cadf' and public: outcome = taxonomy.OUTCOME_SUCCESS _create_cadf_payload(operation, resource_type, resource_id, outcome, initiator) @classmethod def created(cls, resource_type, resource_id, initiator=None, public=True): cls._emit(ACTIONS.created, resource_type, resource_id, initiator, public) @classmethod def updated(cls, resource_type, resource_id, initiator=None, public=True): cls._emit(ACTIONS.updated, resource_type, resource_id, initiator, public) @classmethod def disabled(cls, resource_type, resource_id, initiator=None, public=True): cls._emit(ACTIONS.disabled, resource_type, resource_id, initiator, public) @classmethod def deleted(cls, resource_type, resource_id, initiator=None, public=True): cls._emit(ACTIONS.deleted, resource_type, resource_id, initiator, public) @classmethod def added_to(cls, target_type, target_id, actor_type, actor_id, initiator=None, public=True): actor_dict = {'id': actor_id, 'type': actor_type, 'actor_operation': 'added'} cls._emit(ACTIONS.updated, target_type, target_id, initiator, public, actor_dict=actor_dict) @classmethod def removed_from(cls, target_type, target_id, actor_type, actor_id, initiator=None, public=True): actor_dict = {'id': actor_id, 'type': actor_type, 'actor_operation': 'removed'} cls._emit(ACTIONS.updated, target_type, target_id, initiator, public, actor_dict=actor_dict) @classmethod def internal(cls, resource_type, resource_id): # NOTE(lbragstad): Internal notifications are never public and have # never used the initiator variable, but the _emit() method expects # them. Let's set them here but not expose them through the method # signature - that way someone can not do something like send an # internal notification publicly. initiator = None public = False cls._emit(ACTIONS.internal, resource_type, resource_id, initiator, public) def _get_callback_info(callback): """Return list containing callback's module and name. If the callback is a bound instance method also return the class name. :param callback: Function to call :type callback: function :returns: List containing parent module, (optional class,) function name :rtype: list """ module_name = getattr(callback, '__module__', None) func_name = callback.__name__ if inspect.ismethod(callback): class_name = reflection.get_class_name(callback.__self__, fully_qualified=False) return [module_name, class_name, func_name] else: return [module_name, func_name] def register_event_callback(event, resource_type, callbacks): """Register each callback with the event. :param event: Action being registered :type event: keystone.notifications.ACTIONS :param resource_type: Type of resource being operated on :type resource_type: str :param callbacks: Callback items to be registered with event :type callbacks: list :raises ValueError: If event is not a valid ACTION :raises TypeError: If callback is not callable """ if event not in ACTIONS: raise ValueError(_('%(event)s is not a valid notification event, must ' 'be one of: %(actions)s') % {'event': event, 'actions': ', '.join(ACTIONS)}) if not hasattr(callbacks, '__iter__'): callbacks = [callbacks] for callback in callbacks: if not callable(callback): msg = _('Method not callable: %s') % callback LOG.error(msg) raise TypeError(msg) _SUBSCRIBERS.setdefault(event, {}).setdefault(resource_type, set()) _SUBSCRIBERS[event][resource_type].add(callback) if LOG.logger.getEffectiveLevel() <= logging.DEBUG: # Do this only if its going to appear in the logs. msg = 'Callback: `%(callback)s` subscribed to event `%(event)s`.' callback_info = _get_callback_info(callback) callback_str = '.'.join(i for i in callback_info if i is not None) event_str = '.'.join(['identity', resource_type, event]) LOG.debug(msg, {'callback': callback_str, 'event': event_str}) def listener(cls): """A class decorator to declare a class to be a notification listener. A notification listener must specify the event(s) it is interested in by defining a ``event_callbacks`` attribute or property. ``event_callbacks`` is a dictionary where the key is the type of event and the value is a dictionary containing a mapping of resource types to callback(s). :data:`.ACTIONS` contains constants for the currently supported events. There is currently no single place to find constants for the resource types. Example:: @listener class Something(object): def __init__(self): self.event_callbacks = { notifications.ACTIONS.created: { 'user': self._user_created_callback, }, notifications.ACTIONS.deleted: { 'project': [ self._project_deleted_callback, self._do_cleanup, ] }, } """ def init_wrapper(init): @functools.wraps(init) def __new_init__(self, *args, **kwargs): init(self, *args, **kwargs) _register_event_callbacks(self) return __new_init__ def _register_event_callbacks(self): for event, resource_types in self.event_callbacks.items(): for resource_type, callbacks in resource_types.items(): register_event_callback(event, resource_type, callbacks) cls.__init__ = init_wrapper(cls.__init__) return cls def notify_event_callbacks(service, resource_type, operation, payload): """Sends a notification to registered extensions.""" if operation in _SUBSCRIBERS: if resource_type in _SUBSCRIBERS[operation]: for cb in _SUBSCRIBERS[operation][resource_type]: subst_dict = {'cb_name': cb.__name__, 'service': service, 'resource_type': resource_type, 'operation': operation, 'payload': payload} LOG.debug('Invoking callback %(cb_name)s for event ' '%(service)s %(resource_type)s %(operation)s for ' '%(payload)s', subst_dict) cb(service, resource_type, operation, payload) def _get_notifier(): """Return a notifier object. If _notifier is None it means that a notifier object has not been set. If _notifier is False it means that a notifier has previously failed to construct. Otherwise it is a constructed Notifier object. """ global _notifier if _notifier is None: host = CONF.default_publisher_id or socket.gethostname() try: transport = oslo_messaging.get_transport(CONF) _notifier = oslo_messaging.Notifier(transport, "identity.%s" % host) except Exception: LOG.exception(_LE("Failed to construct notifier")) _notifier = False return _notifier def clear_subscribers(): """Empty subscribers dictionary. This effectively stops notifications since there will be no subscribers to publish to. """ _SUBSCRIBERS.clear() def reset_notifier(): """Reset the notifications internal state. This is used only for testing purposes. """ global _notifier _notifier = None def _create_cadf_payload(operation, resource_type, resource_id, outcome, initiator): """Prepare data for CADF audit notifier. Transform the arguments into content to be consumed by the function that emits CADF events (_send_audit_notification). Specifically the ``resource_type`` (role, user, etc) must be transformed into a CADF keyword, such as: ``data/security/role``. The ``resource_id`` is added as a top level value for the ``resource_info`` key. Lastly, the ``operation`` is used to create the CADF ``action``, and the ``event_type`` name. As per the CADF specification, the ``action`` must start with create, update, delete, etc... i.e.: created.user or deleted.role However the ``event_type`` is an OpenStack-ism that is typically of the form project.resource.operation. i.e.: identity.project.updated :param operation: operation being performed (created, updated, or deleted) :param resource_type: type of resource being operated on (role, user, etc) :param resource_id: ID of resource being operated on :param outcome: outcomes of the operation (SUCCESS, FAILURE, etc) :param initiator: CADF representation of the user that created the request """ if resource_type not in CADF_TYPE_MAP: target_uri = taxonomy.UNKNOWN else: target_uri = CADF_TYPE_MAP.get(resource_type) target = resource.Resource(typeURI=target_uri, id=resource_id) audit_kwargs = {'resource_info': resource_id} cadf_action = '%s.%s' % (operation, resource_type) event_type = '%s.%s.%s' % (SERVICE, resource_type, operation) _send_audit_notification(cadf_action, initiator, outcome, target, event_type, **audit_kwargs) def _send_notification(operation, resource_type, resource_id, actor_dict=None, public=True): """Send notification to inform observers about the affected resource. This method doesn't raise an exception when sending the notification fails. :param operation: operation being performed (created, updated, or deleted) :param resource_type: type of resource being operated on :param resource_id: ID of resource being operated on :param actor_dict: a dictionary containing the actor's ID and type :param public: if True (default), the event will be sent to the notifier API. if False, the event will only be sent via notify_event_callbacks to in process listeners. """ payload = {'resource_info': resource_id} if actor_dict: payload['actor_id'] = actor_dict['id'] payload['actor_type'] = actor_dict['type'] payload['actor_operation'] = actor_dict['actor_operation'] notify_event_callbacks(SERVICE, resource_type, operation, payload) # Only send this notification if the 'basic' format is used, otherwise # let the CADF functions handle sending the notification. But we check # here so as to not disrupt the notify_event_callbacks function. if public and CONF.notification_format == 'basic': notifier = _get_notifier() if notifier: context = {} event_type = '%(service)s.%(resource_type)s.%(operation)s' % { 'service': SERVICE, 'resource_type': resource_type, 'operation': operation} if _check_notification_opt_out(event_type, outcome=None): return try: notifier.info(context, event_type, payload) except Exception: LOG.exception(_LE( 'Failed to send %(res_id)s %(event_type)s notification'), {'res_id': resource_id, 'event_type': event_type}) def _get_request_audit_info(context, user_id=None): """Collect audit information about the request used for CADF. :param context: Request context :param user_id: Optional user ID, alternatively collected from context :returns: Auditing data about the request :rtype: :class:`pycadf.Resource` """ remote_addr = None http_user_agent = None project_id = None domain_id = None if context and 'environment' in context and context['environment']: environment = context['environment'] remote_addr = environment.get('REMOTE_ADDR') http_user_agent = environment.get('HTTP_USER_AGENT') if not user_id: user_id = environment.get('KEYSTONE_AUTH_CONTEXT', {}).get('user_id') project_id = environment.get('KEYSTONE_AUTH_CONTEXT', {}).get('project_id') domain_id = environment.get('KEYSTONE_AUTH_CONTEXT', {}).get('domain_id') host = pycadf.host.Host(address=remote_addr, agent=http_user_agent) initiator = resource.Resource(typeURI=taxonomy.ACCOUNT_USER, host=host) if user_id: initiator.user_id = user_id initiator.id = utils.resource_uuid(user_id) if project_id: initiator.project_id = project_id if domain_id: initiator.domain_id = domain_id return initiator class CadfNotificationWrapper(object): """Send CADF event notifications for various methods. This function is only used for Authentication events. Its ``action`` and ``event_type`` are dictated below. - action: ``authenticate`` - event_type: ``identity.authenticate`` Sends CADF notifications for events such as whether an authentication was successful or not. :param operation: The authentication related action being performed """ def __init__(self, operation): self.action = operation self.event_type = '%s.%s' % (SERVICE, operation) def __call__(self, f): @functools.wraps(f) def wrapper(wrapped_self, context, user_id, *args, **kwargs): """Always send a notification.""" initiator = _get_request_audit_info(context, user_id) target = resource.Resource(typeURI=taxonomy.ACCOUNT_USER) try: result = f(wrapped_self, context, user_id, *args, **kwargs) except Exception: # For authentication failure send a cadf event as well _send_audit_notification(self.action, initiator, taxonomy.OUTCOME_FAILURE, target, self.event_type) raise else: _send_audit_notification(self.action, initiator, taxonomy.OUTCOME_SUCCESS, target, self.event_type) return result return wrapper class CadfRoleAssignmentNotificationWrapper(object): """Send CADF notifications for ``role_assignment`` methods. This function is only used for role assignment events. Its ``action`` and ``event_type`` are dictated below. - action: ``created.role_assignment`` or ``deleted.role_assignment`` - event_type: ``identity.role_assignment.created`` or ``identity.role_assignment.deleted`` Sends a CADF notification if the wrapped method does not raise an :class:`Exception` (such as :class:`keystone.exception.NotFound`). :param operation: one of the values from ACTIONS (created or deleted) """ ROLE_ASSIGNMENT = 'role_assignment' def __init__(self, operation): self.action = '%s.%s' % (operation, self.ROLE_ASSIGNMENT) self.event_type = '%s.%s.%s' % (SERVICE, self.ROLE_ASSIGNMENT, operation) def __call__(self, f): @functools.wraps(f) def wrapper(wrapped_self, role_id, *args, **kwargs): """Send a notification if the wrapped callable is successful. NOTE(stevemar): The reason we go through checking kwargs and args for possible target and actor values is because the create_grant() (and delete_grant()) method are called differently in various tests. Using named arguments, i.e.:: create_grant(user_id=user['id'], domain_id=domain['id'], role_id=role['id']) Or, using positional arguments, i.e.:: create_grant(role_id['id'], user['id'], None, domain_id=domain['id'], None) Or, both, i.e.:: create_grant(role_id['id'], user_id=user['id'], domain_id=domain['id']) Checking the values for kwargs is easy enough, since it comes in as a dictionary The actual method signature is :: create_grant(role_id, user_id=None, group_id=None, domain_id=None, project_id=None, inherited_to_projects=False) So, if the values of actor or target are still None after checking kwargs, we can check the positional arguments, based on the method signature. """ call_args = inspect.getcallargs( f, wrapped_self, role_id, *args, **kwargs) inherited = call_args['inherited_to_projects'] context = call_args['context'] initiator = _get_request_audit_info(context) target = resource.Resource(typeURI=taxonomy.ACCOUNT_USER) audit_kwargs = {} if call_args['project_id']: audit_kwargs['project'] = call_args['project_id'] elif call_args['domain_id']: audit_kwargs['domain'] = call_args['domain_id'] if call_args['user_id']: audit_kwargs['user'] = call_args['user_id'] elif call_args['group_id']: audit_kwargs['group'] = call_args['group_id'] audit_kwargs['inherited_to_projects'] = inherited audit_kwargs['role'] = role_id try: result = f(wrapped_self, role_id, *args, **kwargs) except Exception: _send_audit_notification(self.action, initiator, taxonomy.OUTCOME_FAILURE, target, self.event_type, **audit_kwargs) raise else: _send_audit_notification(self.action, initiator, taxonomy.OUTCOME_SUCCESS, target, self.event_type, **audit_kwargs) return result return wrapper def send_saml_audit_notification(action, context, user_id, group_ids, identity_provider, protocol, token_id, outcome): """Send notification to inform observers about SAML events. :param action: Action being audited :type action: str :param context: Current request context to collect request info from :type context: dict :param user_id: User ID from Keystone token :type user_id: str :param group_ids: List of Group IDs from Keystone token :type group_ids: list :param identity_provider: ID of the IdP from the Keystone token :type identity_provider: str or None :param protocol: Protocol ID for IdP from the Keystone token :type protocol: str :param token_id: audit_id from Keystone token :type token_id: str or None :param outcome: One of :class:`pycadf.cadftaxonomy` :type outcome: str """ initiator = _get_request_audit_info(context) target = resource.Resource(typeURI=taxonomy.ACCOUNT_USER) audit_type = SAML_AUDIT_TYPE user_id = user_id or taxonomy.UNKNOWN token_id = token_id or taxonomy.UNKNOWN group_ids = group_ids or [] cred = credential.FederatedCredential(token=token_id, type=audit_type, identity_provider=identity_provider, user=user_id, groups=group_ids) initiator.credential = cred event_type = '%s.%s' % (SERVICE, action) _send_audit_notification(action, initiator, outcome, target, event_type) def _send_audit_notification(action, initiator, outcome, target, event_type, **kwargs): """Send CADF notification to inform observers about the affected resource. This method logs an exception when sending the notification fails. :param action: CADF action being audited (e.g., 'authenticate') :param initiator: CADF resource representing the initiator :param outcome: The CADF outcome (taxonomy.OUTCOME_PENDING, taxonomy.OUTCOME_SUCCESS, taxonomy.OUTCOME_FAILURE) :param target: CADF resource representing the target :param event_type: An OpenStack-ism, typically this is the meter name that Ceilometer uses to poll events. :param kwargs: Any additional arguments passed in will be added as key-value pairs to the CADF event. """ if _check_notification_opt_out(event_type, outcome): return event = eventfactory.EventFactory().new_event( eventType=cadftype.EVENTTYPE_ACTIVITY, outcome=outcome, action=action, initiator=initiator, target=target, observer=resource.Resource(typeURI=taxonomy.SERVICE_SECURITY)) for key, value in kwargs.items(): setattr(event, key, value) context = {} payload = event.as_dict() notifier = _get_notifier() if notifier: try: notifier.info(context, event_type, payload) except Exception: # diaper defense: any exception that occurs while emitting the # notification should not interfere with the API request LOG.exception(_LE( 'Failed to send %(action)s %(event_type)s notification'), {'action': action, 'event_type': event_type}) def _check_notification_opt_out(event_type, outcome): """Check if a particular event_type has been opted-out of. This method checks to see if an event should be sent to the messaging service. Any event specified in the opt-out list will not be transmitted. :param event_type: This is the meter name that Ceilometer uses to poll events. For example: identity.user.created, or identity.authenticate.success, or identity.role_assignment.created :param outcome: The CADF outcome (taxonomy.OUTCOME_PENDING, taxonomy.OUTCOME_SUCCESS, taxonomy.OUTCOME_FAILURE) """ # NOTE(stevemar): Special handling for authenticate, we look at the outcome # as well when evaluating. For authN events, event_type is just # idenitity.authenticate, which isn't fine enough to provide any opt-out # value, so we attach the outcome to re-create the meter name used in # ceilometer. if 'authenticate' in event_type: event_type = event_type + "." + outcome if event_type in CONF.notification_opt_out: return True return False emit_event = CadfNotificationWrapper role_assignment = CadfRoleAssignmentNotificationWrapper keystone-9.0.0/keystone/trust/0000775000567000056710000000000012701407246017547 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/trust/backends/0000775000567000056710000000000012701407246021321 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/trust/backends/__init__.py0000664000567000056710000000000012701407102023407 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/trust/backends/sql.py0000664000567000056710000002002712701407102022462 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from oslo_utils import timeutils from six.moves import range from keystone.common import sql from keystone import exception from keystone import trust # The maximum number of iterations that will be attempted for optimistic # locking on consuming a limited-use trust. MAXIMUM_CONSUME_ATTEMPTS = 10 class TrustModel(sql.ModelBase, sql.DictBase): __tablename__ = 'trust' attributes = ['id', 'trustor_user_id', 'trustee_user_id', 'project_id', 'impersonation', 'expires_at', 'remaining_uses', 'deleted_at'] id = sql.Column(sql.String(64), primary_key=True) # user id of owner trustor_user_id = sql.Column(sql.String(64), nullable=False,) # user_id of user allowed to consume this preauth trustee_user_id = sql.Column(sql.String(64), nullable=False) project_id = sql.Column(sql.String(64)) impersonation = sql.Column(sql.Boolean, nullable=False) deleted_at = sql.Column(sql.DateTime) expires_at = sql.Column(sql.DateTime) remaining_uses = sql.Column(sql.Integer, nullable=True) extra = sql.Column(sql.JsonBlob()) __table_args__ = (sql.UniqueConstraint( 'trustor_user_id', 'trustee_user_id', 'project_id', 'impersonation', 'expires_at', name='duplicate_trust_constraint'),) class TrustRole(sql.ModelBase): __tablename__ = 'trust_role' attributes = ['trust_id', 'role_id'] trust_id = sql.Column(sql.String(64), primary_key=True, nullable=False) role_id = sql.Column(sql.String(64), primary_key=True, nullable=False) class Trust(trust.TrustDriverV8): @sql.handle_conflicts(conflict_type='trust') def create_trust(self, trust_id, trust, roles): with sql.session_for_write() as session: ref = TrustModel.from_dict(trust) ref['id'] = trust_id if ref.get('expires_at') and ref['expires_at'].tzinfo is not None: ref['expires_at'] = timeutils.normalize_time(ref['expires_at']) session.add(ref) added_roles = [] for role in roles: trust_role = TrustRole() trust_role.trust_id = trust_id trust_role.role_id = role['id'] added_roles.append({'id': role['id']}) session.add(trust_role) trust_dict = ref.to_dict() trust_dict['roles'] = added_roles return trust_dict def _add_roles(self, trust_id, session, trust_dict): roles = [] for role in session.query(TrustRole).filter_by(trust_id=trust_id): roles.append({'id': role.role_id}) trust_dict['roles'] = roles @sql.handle_conflicts(conflict_type='trust') def consume_use(self, trust_id): for attempt in range(MAXIMUM_CONSUME_ATTEMPTS): with sql.session_for_write() as session: try: query_result = (session.query(TrustModel.remaining_uses). filter_by(id=trust_id). filter_by(deleted_at=None).one()) except sql.NotFound: raise exception.TrustNotFound(trust_id=trust_id) remaining_uses = query_result.remaining_uses if remaining_uses is None: # unlimited uses, do nothing break elif remaining_uses > 0: # NOTE(morganfainberg): use an optimistic locking method # to ensure we only ever update a trust that has the # expected number of remaining uses. rows_affected = ( session.query(TrustModel). filter_by(id=trust_id). filter_by(deleted_at=None). filter_by(remaining_uses=remaining_uses). update({'remaining_uses': (remaining_uses - 1)}, synchronize_session=False)) if rows_affected == 1: # Successfully consumed a single limited-use trust. # Since trust_id is the PK on the Trust table, there is # no case we should match more than 1 row in the # update. We either update 1 row or 0 rows. break else: raise exception.TrustUseLimitReached(trust_id=trust_id) # NOTE(morganfainberg): Ensure we have a yield point for eventlet # here. This should cost us nothing otherwise. This can be removed # if/when oslo_db cleanly handles yields on db calls. time.sleep(0) else: # NOTE(morganfainberg): In the case the for loop is not prematurely # broken out of, this else block is executed. This means the trust # was not unlimited nor was it consumed (we hit the maximum # iteration limit). This is just an indicator that we were unable # to get the optimistic lock rather than silently failing or # incorrectly indicating a trust was consumed. raise exception.TrustConsumeMaximumAttempt(trust_id=trust_id) def get_trust(self, trust_id, deleted=False): with sql.session_for_read() as session: query = session.query(TrustModel).filter_by(id=trust_id) if not deleted: query = query.filter_by(deleted_at=None) ref = query.first() if ref is None: raise exception.TrustNotFound(trust_id=trust_id) if ref.expires_at is not None and not deleted: now = timeutils.utcnow() if now > ref.expires_at: raise exception.TrustNotFound(trust_id=trust_id) # Do not return trusts that can't be used anymore if ref.remaining_uses is not None and not deleted: if ref.remaining_uses <= 0: raise exception.TrustNotFound(trust_id=trust_id) trust_dict = ref.to_dict() self._add_roles(trust_id, session, trust_dict) return trust_dict @sql.handle_conflicts(conflict_type='trust') def list_trusts(self): with sql.session_for_read() as session: trusts = session.query(TrustModel).filter_by(deleted_at=None) return [trust_ref.to_dict() for trust_ref in trusts] @sql.handle_conflicts(conflict_type='trust') def list_trusts_for_trustee(self, trustee_user_id): with sql.session_for_read() as session: trusts = (session.query(TrustModel). filter_by(deleted_at=None). filter_by(trustee_user_id=trustee_user_id)) return [trust_ref.to_dict() for trust_ref in trusts] @sql.handle_conflicts(conflict_type='trust') def list_trusts_for_trustor(self, trustor_user_id): with sql.session_for_read() as session: trusts = (session.query(TrustModel). filter_by(deleted_at=None). filter_by(trustor_user_id=trustor_user_id)) return [trust_ref.to_dict() for trust_ref in trusts] @sql.handle_conflicts(conflict_type='trust') def delete_trust(self, trust_id): with sql.session_for_write() as session: trust_ref = session.query(TrustModel).get(trust_id) if not trust_ref: raise exception.TrustNotFound(trust_id=trust_id) trust_ref.deleted_at = timeutils.utcnow() keystone-9.0.0/keystone/trust/schema.py0000664000567000056710000000335112701407102021352 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common import validation from keystone.common.validation import parameter_types _trust_properties = { # NOTE(lbragstad): These are set as external_id_string because they have # the ability to be read as LDAP user identifiers, which could be something # other than uuid. 'trustor_user_id': parameter_types.external_id_string, 'trustee_user_id': parameter_types.external_id_string, 'impersonation': parameter_types.boolean, 'project_id': validation.nullable(parameter_types.id_string), 'remaining_uses': { 'type': ['integer', 'null'], 'minimum': 1 }, 'expires_at': { 'type': ['null', 'string'] }, 'allow_redelegation': { 'type': ['boolean', 'null'] }, 'redelegation_count': { 'type': ['integer', 'null'], 'minimum': 0 }, # TODO(lbragstad): Need to find a better way to do this. We should be # checking that a role is a list of IDs and/or names. 'roles': validation.add_array_type(parameter_types.id_string) } trust_create = { 'type': 'object', 'properties': _trust_properties, 'required': ['trustor_user_id', 'trustee_user_id', 'impersonation'], 'additionalProperties': True } keystone-9.0.0/keystone/trust/__init__.py0000664000567000056710000000124312701407102021647 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.trust import controllers # noqa from keystone.trust.core import * # noqa keystone-9.0.0/keystone/trust/core.py0000664000567000056710000002246412701407102021050 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Main entry point into the Trust service.""" import abc from oslo_config import cfg import six from six.moves import zip from keystone.common import dependency from keystone.common import manager from keystone import exception from keystone.i18n import _ from keystone import notifications CONF = cfg.CONF @dependency.requires('identity_api') @dependency.provider('trust_api') class Manager(manager.Manager): """Default pivot point for the Trust backend. See :mod:`keystone.common.manager.Manager` for more details on how this dynamically calls the backend. """ driver_namespace = 'keystone.trust' _TRUST = "OS-TRUST:trust" def __init__(self): super(Manager, self).__init__(CONF.trust.driver) @staticmethod def _validate_redelegation(redelegated_trust, trust): # Validate against: # 0 < redelegation_count <= max_redelegation_count max_redelegation_count = CONF.trust.max_redelegation_count redelegation_depth = redelegated_trust.get('redelegation_count', 0) if not (0 < redelegation_depth <= max_redelegation_count): raise exception.Forbidden( _('Remaining redelegation depth of %(redelegation_depth)d' ' out of allowed range of [0..%(max_count)d]') % {'redelegation_depth': redelegation_depth, 'max_count': max_redelegation_count}) # remaining_uses is None remaining_uses = trust.get('remaining_uses') if remaining_uses is not None: raise exception.Forbidden( _('Field "remaining_uses" is set to %(value)s' ' while it must not be set in order to redelegate a trust'), value=remaining_uses) # expiry times trust_expiry = trust.get('expires_at') redelegated_expiry = redelegated_trust['expires_at'] if trust_expiry: # redelegated trust is from backend and has no tzinfo if redelegated_expiry < trust_expiry.replace(tzinfo=None): raise exception.Forbidden( _('Requested expiration time is more ' 'than redelegated trust can provide')) else: trust['expires_at'] = redelegated_expiry # trust roles is a subset of roles of the redelegated trust parent_roles = set(role['id'] for role in redelegated_trust['roles']) if not all(role['id'] in parent_roles for role in trust['roles']): raise exception.Forbidden( _('Some of requested roles are not in redelegated trust')) def get_trust_pedigree(self, trust_id): trust = self.driver.get_trust(trust_id) trust_chain = [trust] while trust and trust.get('redelegated_trust_id'): trust = self.driver.get_trust(trust['redelegated_trust_id']) trust_chain.append(trust) return trust_chain def get_trust(self, trust_id, deleted=False): trust = self.driver.get_trust(trust_id, deleted) if trust and trust.get('redelegated_trust_id') and not deleted: trust_chain = self.get_trust_pedigree(trust_id) for parent, child in zip(trust_chain[1:], trust_chain): self._validate_redelegation(parent, child) try: self.identity_api.assert_user_enabled( parent['trustee_user_id']) except (AssertionError, exception.NotFound): raise exception.Forbidden( _('One of the trust agents is disabled or deleted')) return trust def create_trust(self, trust_id, trust, roles, redelegated_trust=None, initiator=None): """Create a new trust. :returns: a new trust """ # Default for initial trust in chain is max_redelegation_count max_redelegation_count = CONF.trust.max_redelegation_count requested_count = trust.get('redelegation_count') redelegatable = (trust.pop('allow_redelegation', False) and requested_count != 0) if not redelegatable: trust['redelegation_count'] = requested_count = 0 remaining_uses = trust.get('remaining_uses') if remaining_uses is not None and remaining_uses <= 0: msg = _('remaining_uses must be a positive integer or null.') raise exception.ValidationError(msg) else: # Validate requested redelegation depth if requested_count and requested_count > max_redelegation_count: raise exception.Forbidden( _('Requested redelegation depth of %(requested_count)d ' 'is greater than allowed %(max_count)d') % {'requested_count': requested_count, 'max_count': max_redelegation_count}) # Decline remaining_uses if trust.get('remaining_uses') is not None: raise exception.ValidationError( _('remaining_uses must not be set if redelegation is ' 'allowed')) if redelegated_trust: trust['redelegated_trust_id'] = redelegated_trust['id'] remaining_count = redelegated_trust['redelegation_count'] - 1 # Validate depth consistency if (redelegatable and requested_count and requested_count != remaining_count): msg = _('Modifying "redelegation_count" upon redelegation is ' 'forbidden. Omitting this parameter is advised.') raise exception.Forbidden(msg) trust.setdefault('redelegation_count', remaining_count) # Check entire trust pedigree validity pedigree = self.get_trust_pedigree(redelegated_trust['id']) for t in pedigree: self._validate_redelegation(t, trust) trust.setdefault('redelegation_count', max_redelegation_count) ref = self.driver.create_trust(trust_id, trust, roles) notifications.Audit.created(self._TRUST, trust_id, initiator=initiator) return ref def delete_trust(self, trust_id, initiator=None): """Remove a trust. :raises keystone.exception.TrustNotFound: If the trust doesn't exist. Recursively remove given and redelegated trusts """ trust = self.driver.get_trust(trust_id) trusts = self.driver.list_trusts_for_trustor( trust['trustor_user_id']) for t in trusts: if t.get('redelegated_trust_id') == trust_id: # recursive call to make sure all notifications are sent try: self.delete_trust(t['id']) except exception.TrustNotFound: # nosec # if trust was deleted by concurrent process # consistency must not suffer pass # end recursion self.driver.delete_trust(trust_id) notifications.Audit.deleted(self._TRUST, trust_id, initiator) @six.add_metaclass(abc.ABCMeta) class TrustDriverV8(object): @abc.abstractmethod def create_trust(self, trust_id, trust, roles): """Create a new trust. :returns: a new trust """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_trust(self, trust_id, deleted=False): """Get a trust by the trust id. :param trust_id: the trust identifier :type trust_id: string :param deleted: return the trust even if it is deleted, expired, or has no consumptions left :type deleted: bool """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_trusts(self): raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_trusts_for_trustee(self, trustee): raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_trusts_for_trustor(self, trustor): raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_trust(self, trust_id): raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def consume_use(self, trust_id): """Consume one use of a trust. One use of a trust is consumed when the trust was created with a limitation on its uses, provided there are still uses available. :raises keystone.exception.TrustUseLimitReached: If no remaining uses for trust. :raises keystone.exception.TrustNotFound: If the trust doesn't exist. """ raise exception.NotImplemented() # pragma: no cover Driver = manager.create_legacy_driver(TrustDriverV8) keystone-9.0.0/keystone/trust/controllers.py0000664000567000056710000002576212701407105022475 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from oslo_utils import timeutils import six from keystone import assignment from keystone.common import controller from keystone.common import dependency from keystone.common import utils from keystone.common import validation from keystone import exception from keystone.i18n import _ from keystone import notifications from keystone.trust import schema def _trustor_trustee_only(trust, user_id): if (user_id != trust.get('trustee_user_id') and user_id != trust.get('trustor_user_id')): raise exception.Forbidden() def _admin_trustor_only(context, trust, user_id): if user_id != trust.get('trustor_user_id') and not context['is_admin']: raise exception.Forbidden() @dependency.requires('assignment_api', 'identity_api', 'resource_api', 'role_api', 'token_provider_api', 'trust_api') class TrustV3(controller.V3Controller): collection_name = "trusts" member_name = "trust" @classmethod def base_url(cls, context, path=None): """Construct a path and pass it to V3Controller.base_url method.""" # NOTE(stevemar): Overriding path to /OS-TRUST/trusts so that # V3Controller.base_url handles setting the self link correctly. path = '/OS-TRUST/' + cls.collection_name return super(TrustV3, cls).base_url(context, path=path) def _get_user_id(self, context): try: token_ref = utils.get_token_ref(context) except exception.Unauthorized: return None return token_ref.user_id def get_trust(self, context, trust_id): user_id = self._get_user_id(context) trust = self.trust_api.get_trust(trust_id) _trustor_trustee_only(trust, user_id) self._fill_in_roles(context, trust, self.role_api.list_roles()) return TrustV3.wrap_member(context, trust) def _fill_in_roles(self, context, trust, all_roles): if trust.get('expires_at') is not None: trust['expires_at'] = (utils.isotime (trust['expires_at'], subsecond=True)) if 'roles' not in trust: trust['roles'] = [] trust_full_roles = [] for trust_role in trust['roles']: if isinstance(trust_role, six.string_types): trust_role = {'id': trust_role} matching_roles = [x for x in all_roles if x['id'] == trust_role['id']] if matching_roles: full_role = assignment.controllers.RoleV3.wrap_member( context, matching_roles[0])['role'] trust_full_roles.append(full_role) trust['roles'] = trust_full_roles trust['roles_links'] = { 'self': (self.base_url(context) + "/%s/roles" % trust['id']), 'next': None, 'previous': None} def _normalize_role_list(self, trust, all_roles): trust_roles = [] all_role_names = {r['name']: r for r in all_roles} for role in trust.get('roles', []): if 'id' in role: trust_roles.append({'id': role['id']}) elif 'name' in role: rolename = role['name'] if rolename in all_role_names: trust_roles.append({'id': all_role_names[rolename]['id']}) else: raise exception.RoleNotFound(_("role %s is not defined") % rolename) else: raise exception.ValidationError(attribute='id or name', target='roles') return trust_roles @controller.protected() @validation.validated(schema.trust_create, 'trust') def create_trust(self, context, trust): """Create a new trust. The user creating the trust must be the trustor. """ auth_context = context.get('environment', {}).get('KEYSTONE_AUTH_CONTEXT', {}) # Check if delegated via trust if auth_context.get('is_delegated_auth'): # Redelegation case src_trust_id = auth_context['trust_id'] if not src_trust_id: raise exception.Forbidden( _('Redelegation allowed for delegated by trust only')) redelegated_trust = self.trust_api.get_trust(src_trust_id) else: redelegated_trust = None if trust.get('project_id'): self._require_role(trust) self._require_user_is_trustor(context, trust) self._require_trustee_exists(trust['trustee_user_id']) all_roles = self.role_api.list_roles() # Normalize roles normalized_roles = self._normalize_role_list(trust, all_roles) trust['roles'] = normalized_roles self._require_trustor_has_role_in_project(trust) trust['expires_at'] = self._parse_expiration_date( trust.get('expires_at')) trust_id = uuid.uuid4().hex initiator = notifications._get_request_audit_info(context) new_trust = self.trust_api.create_trust(trust_id, trust, normalized_roles, redelegated_trust, initiator) self._fill_in_roles(context, new_trust, all_roles) return TrustV3.wrap_member(context, new_trust) def _require_trustee_exists(self, trustee_user_id): self.identity_api.get_user(trustee_user_id) def _require_user_is_trustor(self, context, trust): user_id = self._get_user_id(context) if user_id != trust.get('trustor_user_id'): raise exception.Forbidden( _("The authenticated user should match the trustor.")) def _require_role(self, trust): if not trust.get('roles'): raise exception.Forbidden( _('At least one role should be specified.')) def _get_trustor_roles(self, trust): original_trust = trust.copy() while original_trust.get('redelegated_trust_id'): original_trust = self.trust_api.get_trust( original_trust['redelegated_trust_id']) if not self._attribute_is_empty(trust, 'project_id'): self.resource_api.get_project(original_trust['project_id']) # Get a list of roles including any domain specific roles assignment_list = self.assignment_api.list_role_assignments( user_id=original_trust['trustor_user_id'], project_id=original_trust['project_id'], effective=True, strip_domain_roles=False) return list(set([x['role_id'] for x in assignment_list])) else: return [] def _require_trustor_has_role_in_project(self, trust): trustor_roles = self._get_trustor_roles(trust) for trust_role in trust['roles']: matching_roles = [x for x in trustor_roles if x == trust_role['id']] if not matching_roles: raise exception.RoleNotFound(role_id=trust_role['id']) def _parse_expiration_date(self, expiration_date): if expiration_date is None: return None if not expiration_date.endswith('Z'): expiration_date += 'Z' try: expiration_time = timeutils.parse_isotime(expiration_date) except ValueError: raise exception.ValidationTimeStampError() if timeutils.is_older_than(expiration_time, 0): raise exception.ValidationExpirationError() return expiration_time def _check_role_for_trust(self, context, trust_id, role_id): """Checks if a role has been assigned to a trust.""" trust = self.trust_api.get_trust(trust_id) user_id = self._get_user_id(context) _trustor_trustee_only(trust, user_id) if not any(role['id'] == role_id for role in trust['roles']): raise exception.RoleNotFound(role_id=role_id) @controller.protected() def list_trusts(self, context): query = context['query_string'] trusts = [] if not query: self.assert_admin(context) trusts += self.trust_api.list_trusts() if 'trustor_user_id' in query: user_id = query['trustor_user_id'] calling_user_id = self._get_user_id(context) if user_id != calling_user_id: raise exception.Forbidden() trusts += (self.trust_api. list_trusts_for_trustor(user_id)) if 'trustee_user_id' in query: user_id = query['trustee_user_id'] calling_user_id = self._get_user_id(context) if user_id != calling_user_id: raise exception.Forbidden() trusts += self.trust_api.list_trusts_for_trustee(user_id) for trust in trusts: # get_trust returns roles, list_trusts does not # It seems in some circumstances, roles does not # exist in the query response, so check first if 'roles' in trust: del trust['roles'] if trust.get('expires_at') is not None: trust['expires_at'] = (utils.isotime (trust['expires_at'], subsecond=True)) return TrustV3.wrap_collection(context, trusts) @controller.protected() def delete_trust(self, context, trust_id): trust = self.trust_api.get_trust(trust_id) user_id = self._get_user_id(context) _admin_trustor_only(context, trust, user_id) initiator = notifications._get_request_audit_info(context) self.trust_api.delete_trust(trust_id, initiator) @controller.protected() def list_roles_for_trust(self, context, trust_id): trust = self.get_trust(context, trust_id)['trust'] user_id = self._get_user_id(context) _trustor_trustee_only(trust, user_id) return {'roles': trust['roles'], 'links': trust['roles_links']} @controller.protected() def get_role_for_trust(self, context, trust_id, role_id): """Get a role that has been assigned to a trust.""" self._check_role_for_trust(context, trust_id, role_id) role = self.role_api.get_role(role_id) return assignment.controllers.RoleV3.wrap_member(context, role) keystone-9.0.0/keystone/trust/routers.py0000664000567000056710000000470412701407102021620 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """WSGI Routers for the Trust service.""" import functools from keystone.common import json_home from keystone.common import wsgi from keystone.trust import controllers _build_resource_relation = functools.partial( json_home.build_v3_extension_resource_relation, extension_name='OS-TRUST', extension_version='1.0') TRUST_ID_PARAMETER_RELATION = json_home.build_v3_extension_parameter_relation( 'OS-TRUST', '1.0', 'trust_id') class Routers(wsgi.RoutersBase): def append_v3_routers(self, mapper, routers): trust_controller = controllers.TrustV3() self._add_resource( mapper, trust_controller, path='/OS-TRUST/trusts', get_action='list_trusts', post_action='create_trust', rel=_build_resource_relation(resource_name='trusts')) self._add_resource( mapper, trust_controller, path='/OS-TRUST/trusts/{trust_id}', get_action='get_trust', delete_action='delete_trust', rel=_build_resource_relation(resource_name='trust'), path_vars={ 'trust_id': TRUST_ID_PARAMETER_RELATION, }) self._add_resource( mapper, trust_controller, path='/OS-TRUST/trusts/{trust_id}/roles', get_action='list_roles_for_trust', rel=_build_resource_relation(resource_name='trust_roles'), path_vars={ 'trust_id': TRUST_ID_PARAMETER_RELATION, }) self._add_resource( mapper, trust_controller, path='/OS-TRUST/trusts/{trust_id}/roles/{role_id}', get_head_action='get_role_for_trust', rel=_build_resource_relation(resource_name='trust_role'), path_vars={ 'trust_id': TRUST_ID_PARAMETER_RELATION, 'role_id': json_home.Parameters.ROLE_ID, }) keystone-9.0.0/keystone/contrib/0000775000567000056710000000000012701407246020026 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/contrib/ec2/0000775000567000056710000000000012701407246020477 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/contrib/ec2/__init__.py0000664000567000056710000000145512701407102022604 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.contrib.ec2 import controllers # noqa from keystone.contrib.ec2.core import * # noqa from keystone.contrib.ec2.routers import Ec2Extension # noqa from keystone.contrib.ec2.routers import Ec2ExtensionV3 # noqa keystone-9.0.0/keystone/contrib/ec2/core.py0000664000567000056710000000236012701407102021771 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common import extension EXTENSION_DATA = { 'name': 'OpenStack EC2 API', 'namespace': 'http://docs.openstack.org/identity/api/ext/' 'OS-EC2/v1.0', 'alias': 'OS-EC2', 'updated': '2013-07-07T12:00:0-00:00', 'description': 'OpenStack EC2 Credentials backend.', 'links': [ { 'rel': 'describedby', 'type': 'text/html', 'href': 'http://developer.openstack.org/' 'api-ref-identity-v2-ext.html', } ]} extension.register_admin_extension(EXTENSION_DATA['alias'], EXTENSION_DATA) extension.register_public_extension(EXTENSION_DATA['alias'], EXTENSION_DATA) keystone-9.0.0/keystone/contrib/ec2/controllers.py0000664000567000056710000004431512701407102023415 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Main entry point into the EC2 Credentials service. This service allows the creation of access/secret credentials used for the ec2 interop layer of OpenStack. A user can create as many access/secret pairs, each of which is mapped to a specific project. This is required because OpenStack supports a user belonging to multiple projects, whereas the signatures created on ec2-style requests don't allow specification of which project the user wishes to act upon. To complete the cycle, we provide a method that OpenStack services can use to validate a signature and get a corresponding OpenStack token. This token allows method calls to other services within the context the access/secret was created. As an example, Nova requests Keystone to validate the signature of a request, receives a token, and then makes a request to Glance to list images needed to perform the requested task. """ import abc import sys import uuid from keystoneclient.contrib.ec2 import utils as ec2_utils from oslo_serialization import jsonutils import six from keystone.common import controller from keystone.common import dependency from keystone.common import utils from keystone.common import wsgi from keystone import exception from keystone.i18n import _ CRED_TYPE_EC2 = 'ec2' @dependency.requires('assignment_api', 'catalog_api', 'credential_api', 'identity_api', 'resource_api', 'role_api', 'token_provider_api') @six.add_metaclass(abc.ABCMeta) class Ec2ControllerCommon(object): def check_signature(self, creds_ref, credentials): signer = ec2_utils.Ec2Signer(creds_ref['secret']) signature = signer.generate(credentials) # NOTE(davechen): credentials.get('signature') is not guaranteed to # exist, we need check it explicitly. if credentials.get('signature'): if utils.auth_str_equal(credentials['signature'], signature): return True # NOTE(vish): Some client libraries don't use the port when signing # requests, so try again without port. elif ':' in credentials['host']: hostname, _port = credentials['host'].split(':') credentials['host'] = hostname # NOTE(davechen): we need reinitialize 'signer' to avoid # contaminated status of signature, this is similar with # other programming language libraries, JAVA for example. signer = ec2_utils.Ec2Signer(creds_ref['secret']) signature = signer.generate(credentials) if utils.auth_str_equal(credentials['signature'], signature): return True raise exception.Unauthorized( message=_('Invalid EC2 signature.')) else: raise exception.Unauthorized( message=_('EC2 signature not supplied.')) # Raise the exception when credentials.get('signature') is None else: raise exception.Unauthorized( message=_('EC2 signature not supplied.')) @abc.abstractmethod def authenticate(self, context, credentials=None, ec2Credentials=None): """Validate a signed EC2 request and provide a token. Other services (such as Nova) use this **admin** call to determine if a request they signed received is from a valid user. If it is a valid signature, an OpenStack token that maps to the user/tenant is returned to the caller, along with all the other details returned from a normal token validation call. The returned token is useful for making calls to other OpenStack services within the context of the request. :param context: standard context :param credentials: dict of ec2 signature :param ec2Credentials: DEPRECATED dict of ec2 signature :returns: token: OpenStack token equivalent to access key along with the corresponding service catalog and roles """ raise exception.NotImplemented() def _authenticate(self, credentials=None, ec2credentials=None): """Common code shared between the V2 and V3 authenticate methods. :returns: user_ref, tenant_ref, metadata_ref, roles_ref, catalog_ref """ # FIXME(ja): validate that a service token was used! # NOTE(termie): backwards compat hack if not credentials and ec2credentials: credentials = ec2credentials if 'access' not in credentials: raise exception.Unauthorized( message=_('EC2 signature not supplied.')) creds_ref = self._get_credentials(credentials['access']) self.check_signature(creds_ref, credentials) # TODO(termie): don't create new tokens every time # TODO(termie): this is copied from TokenController.authenticate tenant_ref = self.resource_api.get_project(creds_ref['tenant_id']) user_ref = self.identity_api.get_user(creds_ref['user_id']) metadata_ref = {} metadata_ref['roles'] = ( self.assignment_api.get_roles_for_user_and_project( user_ref['id'], tenant_ref['id'])) trust_id = creds_ref.get('trust_id') if trust_id: metadata_ref['trust_id'] = trust_id metadata_ref['trustee_user_id'] = user_ref['id'] # Validate that the auth info is valid and nothing is disabled try: self.identity_api.assert_user_enabled( user_id=user_ref['id'], user=user_ref) self.resource_api.assert_domain_enabled( domain_id=user_ref['domain_id']) self.resource_api.assert_project_enabled( project_id=tenant_ref['id'], project=tenant_ref) except AssertionError as e: six.reraise(exception.Unauthorized, exception.Unauthorized(e), sys.exc_info()[2]) roles = metadata_ref.get('roles', []) if not roles: raise exception.Unauthorized( message=_('User not valid for tenant.')) roles_ref = [self.role_api.get_role(role_id) for role_id in roles] catalog_ref = self.catalog_api.get_catalog( user_ref['id'], tenant_ref['id']) return user_ref, tenant_ref, metadata_ref, roles_ref, catalog_ref def create_credential(self, context, user_id, tenant_id): """Create a secret/access pair for use with ec2 style auth. Generates a new set of credentials that map the user/tenant pair. :param context: standard context :param user_id: id of user :param tenant_id: id of tenant :returns: credential: dict of ec2 credential """ self.identity_api.get_user(user_id) self.resource_api.get_project(tenant_id) trust_id = self._get_trust_id_for_request(context) blob = {'access': uuid.uuid4().hex, 'secret': uuid.uuid4().hex, 'trust_id': trust_id} credential_id = utils.hash_access_key(blob['access']) cred_ref = {'user_id': user_id, 'project_id': tenant_id, 'blob': jsonutils.dumps(blob), 'id': credential_id, 'type': CRED_TYPE_EC2} self.credential_api.create_credential(credential_id, cred_ref) return {'credential': self._convert_v3_to_ec2_credential(cred_ref)} def get_credentials(self, user_id): """List all credentials for a user. :param user_id: id of user :returns: credentials: list of ec2 credential dicts """ self.identity_api.get_user(user_id) credential_refs = self.credential_api.list_credentials_for_user( user_id, type=CRED_TYPE_EC2) return {'credentials': [self._convert_v3_to_ec2_credential(credential) for credential in credential_refs]} def get_credential(self, user_id, credential_id): """Retrieve a user's access/secret pair by the access key. Grab the full access/secret pair for a given access key. :param user_id: id of user :param credential_id: access key for credentials :returns: credential: dict of ec2 credential """ self.identity_api.get_user(user_id) return {'credential': self._get_credentials(credential_id)} def delete_credential(self, user_id, credential_id): """Delete a user's access/secret pair. Used to revoke a user's access/secret pair :param user_id: id of user :param credential_id: access key for credentials :returns: bool: success """ self.identity_api.get_user(user_id) self._get_credentials(credential_id) ec2_credential_id = utils.hash_access_key(credential_id) return self.credential_api.delete_credential(ec2_credential_id) @staticmethod def _convert_v3_to_ec2_credential(credential): # Prior to bug #1259584 fix, blob was stored unserialized # but it should be stored as a json string for compatibility # with the v3 credentials API. Fall back to the old behavior # for backwards compatibility with existing DB contents try: blob = jsonutils.loads(credential['blob']) except TypeError: blob = credential['blob'] return {'user_id': credential.get('user_id'), 'tenant_id': credential.get('project_id'), 'access': blob.get('access'), 'secret': blob.get('secret'), 'trust_id': blob.get('trust_id')} def _get_credentials(self, credential_id): """Return credentials from an ID. :param credential_id: id of credential :raises keystone.exception.Unauthorized: when credential id is invalid or when the credential type is not ec2 :returns: credential: dict of ec2 credential. """ ec2_credential_id = utils.hash_access_key(credential_id) cred = self.credential_api.get_credential(ec2_credential_id) if not cred or cred['type'] != CRED_TYPE_EC2: raise exception.Unauthorized( message=_('EC2 access key not found.')) return self._convert_v3_to_ec2_credential(cred) @dependency.requires('policy_api', 'token_provider_api') class Ec2Controller(Ec2ControllerCommon, controller.V2Controller): @controller.v2_ec2_deprecated def authenticate(self, context, credentials=None, ec2Credentials=None): (user_ref, tenant_ref, metadata_ref, roles_ref, catalog_ref) = self._authenticate(credentials=credentials, ec2credentials=ec2Credentials) # NOTE(morganfainberg): Make sure the data is in correct form since it # might be consumed external to Keystone and this is a v2.0 controller. # The token provider does not explicitly care about user_ref version # in this case, but the data is stored in the token itself and should # match the version user_ref = self.v3_to_v2_user(user_ref) auth_token_data = dict(user=user_ref, tenant=tenant_ref, metadata=metadata_ref, id='placeholder') (token_id, token_data) = self.token_provider_api.issue_v2_token( auth_token_data, roles_ref, catalog_ref) return token_data @controller.v2_ec2_deprecated def get_credential(self, context, user_id, credential_id): if not self._is_admin(context): self._assert_identity(context, user_id) return super(Ec2Controller, self).get_credential(user_id, credential_id) @controller.v2_ec2_deprecated def get_credentials(self, context, user_id): if not self._is_admin(context): self._assert_identity(context, user_id) return super(Ec2Controller, self).get_credentials(user_id) @controller.v2_ec2_deprecated def create_credential(self, context, user_id, tenant_id): if not self._is_admin(context): self._assert_identity(context, user_id) return super(Ec2Controller, self).create_credential(context, user_id, tenant_id) @controller.v2_ec2_deprecated def delete_credential(self, context, user_id, credential_id): if not self._is_admin(context): self._assert_identity(context, user_id) self._assert_owner(user_id, credential_id) return super(Ec2Controller, self).delete_credential(user_id, credential_id) def _assert_identity(self, context, user_id): """Check that the provided token belongs to the user. :param context: standard context :param user_id: id of user :raises keystone.exception.Forbidden: when token is invalid """ token_ref = utils.get_token_ref(context) if token_ref.user_id != user_id: raise exception.Forbidden(_('Token belongs to another user')) def _is_admin(self, context): """Wrap admin assertion error return statement. :param context: standard context :returns: bool: success """ try: # NOTE(morganfainberg): policy_api is required for assert_admin # to properly perform policy enforcement. self.assert_admin(context) return True except (exception.Forbidden, exception.Unauthorized): return False def _assert_owner(self, user_id, credential_id): """Ensure the provided user owns the credential. :param user_id: expected credential owner :param credential_id: id of credential object :raises keystone.exception.Forbidden: on failure """ ec2_credential_id = utils.hash_access_key(credential_id) cred_ref = self.credential_api.get_credential(ec2_credential_id) if user_id != cred_ref['user_id']: raise exception.Forbidden(_('Credential belongs to another user')) @dependency.requires('policy_api', 'token_provider_api') class Ec2ControllerV3(Ec2ControllerCommon, controller.V3Controller): collection_name = 'credentials' member_name = 'credential' def __init__(self): super(Ec2ControllerV3, self).__init__() def _check_credential_owner_and_user_id_match(self, context, prep_info, user_id, credential_id): # NOTE(morganfainberg): this method needs to capture the arguments of # the method that is decorated with @controller.protected() (with # exception of the first argument ('context') since the protected # method passes in *args, **kwargs. In this case, it is easier to see # the expected input if the argspec is `user_id` and `credential_id` # explicitly (matching the :class:`.ec2_delete_credential()` method # below). ref = {} credential_id = utils.hash_access_key(credential_id) ref['credential'] = self.credential_api.get_credential(credential_id) # NOTE(morganfainberg): policy_api is required for this # check_protection to properly be able to perform policy enforcement. self.check_protection(context, prep_info, ref) def authenticate(self, context, credentials=None, ec2Credentials=None): (user_ref, project_ref, metadata_ref, roles_ref, catalog_ref) = self._authenticate(credentials=credentials, ec2credentials=ec2Credentials) method_names = ['ec2credential'] token_id, token_data = self.token_provider_api.issue_v3_token( user_ref['id'], method_names, project_id=project_ref['id'], metadata_ref=metadata_ref) return render_token_data_response(token_id, token_data) @controller.protected(callback=_check_credential_owner_and_user_id_match) def ec2_get_credential(self, context, user_id, credential_id): ref = super(Ec2ControllerV3, self).get_credential(user_id, credential_id) return Ec2ControllerV3.wrap_member(context, ref['credential']) @controller.protected() def ec2_list_credentials(self, context, user_id): refs = super(Ec2ControllerV3, self).get_credentials(user_id) return Ec2ControllerV3.wrap_collection(context, refs['credentials']) @controller.protected() def ec2_create_credential(self, context, user_id, tenant_id): ref = super(Ec2ControllerV3, self).create_credential(context, user_id, tenant_id) return Ec2ControllerV3.wrap_member(context, ref['credential']) @controller.protected(callback=_check_credential_owner_and_user_id_match) def ec2_delete_credential(self, context, user_id, credential_id): return super(Ec2ControllerV3, self).delete_credential(user_id, credential_id) @classmethod def _add_self_referential_link(cls, context, ref): path = '/users/%(user_id)s/credentials/OS-EC2/%(credential_id)s' url = cls.base_url(context, path) % { 'user_id': ref['user_id'], 'credential_id': ref['access']} ref.setdefault('links', {}) ref['links']['self'] = url def render_token_data_response(token_id, token_data): """Render token data HTTP response. Stash token ID into the X-Subject-Token header. """ headers = [('X-Subject-Token', token_id)] return wsgi.render_response(body=token_data, status=(200, 'OK'), headers=headers) keystone-9.0.0/keystone/contrib/ec2/routers.py0000664000567000056710000000635612701407102022555 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools from keystone.common import json_home from keystone.common import wsgi from keystone.contrib.ec2 import controllers build_resource_relation = functools.partial( json_home.build_v3_extension_resource_relation, extension_name='OS-EC2', extension_version='1.0') class Ec2Extension(wsgi.ExtensionRouter): def add_routes(self, mapper): ec2_controller = controllers.Ec2Controller() # validation mapper.connect( '/ec2tokens', controller=ec2_controller, action='authenticate', conditions=dict(method=['POST'])) # crud mapper.connect( '/users/{user_id}/credentials/OS-EC2', controller=ec2_controller, action='create_credential', conditions=dict(method=['POST'])) mapper.connect( '/users/{user_id}/credentials/OS-EC2', controller=ec2_controller, action='get_credentials', conditions=dict(method=['GET'])) mapper.connect( '/users/{user_id}/credentials/OS-EC2/{credential_id}', controller=ec2_controller, action='get_credential', conditions=dict(method=['GET'])) mapper.connect( '/users/{user_id}/credentials/OS-EC2/{credential_id}', controller=ec2_controller, action='delete_credential', conditions=dict(method=['DELETE'])) class Ec2ExtensionV3(wsgi.V3ExtensionRouter): def add_routes(self, mapper): ec2_controller = controllers.Ec2ControllerV3() # validation self._add_resource( mapper, ec2_controller, path='/ec2tokens', post_action='authenticate', rel=build_resource_relation(resource_name='ec2tokens')) # crud self._add_resource( mapper, ec2_controller, path='/users/{user_id}/credentials/OS-EC2', get_action='ec2_list_credentials', post_action='ec2_create_credential', rel=build_resource_relation(resource_name='user_credentials'), path_vars={ 'user_id': json_home.Parameters.USER_ID, }) self._add_resource( mapper, ec2_controller, path='/users/{user_id}/credentials/OS-EC2/{credential_id}', get_action='ec2_get_credential', delete_action='ec2_delete_credential', rel=build_resource_relation(resource_name='user_credential'), path_vars={ 'credential_id': json_home.build_v3_parameter_relation('credential_id'), 'user_id': json_home.Parameters.USER_ID, }) keystone-9.0.0/keystone/contrib/__init__.py0000664000567000056710000000000012701407102022114 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/contrib/federation/0000775000567000056710000000000012701407246022146 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/contrib/federation/backends/0000775000567000056710000000000012701407246023720 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/contrib/federation/backends/__init__.py0000664000567000056710000000000012701407102026006 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/contrib/federation/backends/sql.py0000664000567000056710000000200112701407102025051 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from keystone.federation.backends import sql _OLD = "keystone.contrib.federation.backends.sql.Federation" _NEW = "sql" class Federation(sql.Federation): @versionutils.deprecated(versionutils.deprecated.MITAKA, in_favor_of=_NEW, what=_OLD) def __init__(self, *args, **kwargs): super(Federation, self).__init__(*args, **kwargs) keystone-9.0.0/keystone/contrib/federation/migrate_repo/0000775000567000056710000000000012701407246024623 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/contrib/federation/migrate_repo/versions/0000775000567000056710000000000012701407246026473 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/contrib/federation/migrate_repo/versions/005_add_service_provider_table.py0000664000567000056710000000124212701407102034750 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone import exception def upgrade(migrate_engine): raise exception.MigrationMovedFailure(extension='federation') keystone-9.0.0/keystone/contrib/federation/migrate_repo/versions/003_mapping_id_nullable_false.py0000664000567000056710000000133112701407102034553 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis.inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone import exception def upgrade(migrate_engine): raise exception.MigrationMovedFailure(extension='federation') keystone-9.0.0/keystone/contrib/federation/migrate_repo/versions/__init__.py0000664000567000056710000000000012701407102030561 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/contrib/federation/migrate_repo/versions/008_add_relay_state_to_sp.py0000664000567000056710000000124212701407102033752 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone import exception def upgrade(migrate_engine): raise exception.MigrationMovedFailure(extension='federation') keystone-9.0.0/keystone/contrib/federation/migrate_repo/versions/004_add_remote_id_column.py0000664000567000056710000000124212701407102033552 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone import exception def upgrade(migrate_engine): raise exception.MigrationMovedFailure(extension='federation') ././@LongLink0000000000000000000000000000015200000000000011213 Lustar 00000000000000keystone-9.0.0/keystone/contrib/federation/migrate_repo/versions/006_fixup_service_provider_attributes.pykeystone-9.0.0/keystone/contrib/federation/migrate_repo/versions/006_fixup_service_provider_attribut0000664000567000056710000000124212701407102035474 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone import exception def upgrade(migrate_engine): raise exception.MigrationMovedFailure(extension='federation') keystone-9.0.0/keystone/contrib/federation/migrate_repo/versions/007_add_remote_id_table.py0000664000567000056710000000124212701407102033347 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone import exception def upgrade(migrate_engine): raise exception.MigrationMovedFailure(extension='federation') keystone-9.0.0/keystone/contrib/federation/migrate_repo/versions/002_add_mapping_tables.py0000664000567000056710000000124212701407102033211 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone import exception def upgrade(migrate_engine): raise exception.MigrationMovedFailure(extension='federation') keystone-9.0.0/keystone/contrib/federation/migrate_repo/versions/001_add_identity_provider_table.py0000664000567000056710000000124212701407102035135 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone import exception def upgrade(migrate_engine): raise exception.MigrationMovedFailure(extension='federation') keystone-9.0.0/keystone/contrib/federation/migrate_repo/__init__.py0000664000567000056710000000000012701407102026711 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/contrib/federation/migrate_repo/migrate.cfg0000664000567000056710000000231612701407102026725 0ustar jenkinsjenkins00000000000000[db_settings] # Used to identify which repository this database is versioned under. # You can use the name of your project. repository_id=federation # The name of the database table used to track the schema version. # This name shouldn't already be used by your project. # If this is changed once a database is under version control, you'll need to # change the table name in each database too. version_table=migrate_version # When committing a change script, Migrate will attempt to generate the # sql for all supported databases; normally, if one of them fails - probably # because you don't have that database installed - it is ignored and the # commit continues, perhaps ending successfully. # Databases in this list MUST compile successfully during a commit, or the # entire commit will fail. List the databases your application will actually # be using to ensure your updates to that database work properly. # This must be a list; example: ['postgres','sqlite'] required_dbs=[] # When creating new change scripts, Migrate will stamp the new script with # a version number. By default this is latest_version + 1. You can set this # to 'true' to tell Migrate to use the UTC timestamp instead. use_timestamp_numbering=False keystone-9.0.0/keystone/contrib/federation/__init__.py0000664000567000056710000000000012701407102024234 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/contrib/federation/routers.py0000664000567000056710000000226212701407102024214 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from oslo_log import versionutils from keystone.common import wsgi from keystone.i18n import _ LOG = log.getLogger(__name__) class FederationExtension(wsgi.Middleware): def __init__(self, *args, **kwargs): super(FederationExtension, self).__init__(*args, **kwargs) msg = _("Remove federation_extension from the paste pipeline, the " "federation extension is now always available. Update the " "[pipeline:api_v3] section in keystone-paste.ini accordingly, " "as it will be removed in the O release.") versionutils.report_deprecated_feature(LOG, msg) keystone-9.0.0/keystone/contrib/s3/0000775000567000056710000000000012701407246020353 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/contrib/s3/__init__.py0000664000567000056710000000117112701407102022453 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.contrib.s3.core import * # noqa keystone-9.0.0/keystone/contrib/s3/core.py0000664000567000056710000001122012701407102021640 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Main entry point into the S3 Credentials service. This service provides S3 token validation for services configured with the s3_token middleware to authorize S3 requests. This service uses the same credentials used by EC2. Refer to the documentation for the EC2 module for how to generate the required credentials. """ import base64 import hashlib import hmac import six from keystone.common import extension from keystone.common import json_home from keystone.common import utils from keystone.common import wsgi from keystone.contrib.ec2 import controllers from keystone import exception from keystone.i18n import _ EXTENSION_DATA = { 'name': 'OpenStack S3 API', 'namespace': 'http://docs.openstack.org/identity/api/ext/' 's3tokens/v1.0', 'alias': 's3tokens', 'updated': '2013-07-07T12:00:0-00:00', 'description': 'OpenStack S3 API.', 'links': [ { 'rel': 'describedby', 'type': 'text/html', 'href': 'http://developer.openstack.org/' 'api-ref-identity-v2-ext.html', } ]} extension.register_admin_extension(EXTENSION_DATA['alias'], EXTENSION_DATA) class S3Extension(wsgi.V3ExtensionRouter): def add_routes(self, mapper): controller = S3Controller() # validation self._add_resource( mapper, controller, path='/s3tokens', post_action='authenticate', rel=json_home.build_v3_extension_resource_relation( 's3tokens', '1.0', 's3tokens')) class S3Controller(controllers.Ec2Controller): def check_signature(self, creds_ref, credentials): string_to_sign = base64.urlsafe_b64decode(str(credentials['token'])) if string_to_sign[0:4] != b'AWS4': signature = self._calculate_signature_v1(string_to_sign, creds_ref['secret']) else: signature = self._calculate_signature_v4(string_to_sign, creds_ref['secret']) if not utils.auth_str_equal(credentials['signature'], signature): raise exception.Unauthorized( message=_('Credential signature mismatch')) def _calculate_signature_v1(self, string_to_sign, secret_key): """Calculates a v1 signature. :param bytes string_to_sign: String that contains request params and is used for calculate signature of request :param text secret_key: Second auth key of EC2 account that is used to sign requests """ key = str(secret_key).encode('utf-8') if six.PY2: b64_encode = base64.encodestring else: b64_encode = base64.encodebytes signed = b64_encode(hmac.new(key, string_to_sign, hashlib.sha1) .digest()).decode('utf-8').strip() return signed def _calculate_signature_v4(self, string_to_sign, secret_key): """Calculates a v4 signature. :param bytes string_to_sign: String that contains request params and is used for calculate signature of request :param text secret_key: Second auth key of EC2 account that is used to sign requests """ parts = string_to_sign.split(b'\n') if len(parts) != 4 or parts[0] != b'AWS4-HMAC-SHA256': raise exception.Unauthorized(message=_('Invalid EC2 signature.')) scope = parts[2].split(b'/') if len(scope) != 4 or scope[2] != b's3' or scope[3] != b'aws4_request': raise exception.Unauthorized(message=_('Invalid EC2 signature.')) def _sign(key, msg): return hmac.new(key, msg, hashlib.sha256).digest() signed = _sign(('AWS4' + secret_key).encode('utf-8'), scope[0]) signed = _sign(signed, scope[1]) signed = _sign(signed, scope[2]) signed = _sign(signed, b'aws4_request') signature = hmac.new(signed, string_to_sign, hashlib.sha256) return signature.hexdigest() keystone-9.0.0/keystone/contrib/oauth1/0000775000567000056710000000000012701407246021227 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/contrib/oauth1/backends/0000775000567000056710000000000012701407246023001 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/contrib/oauth1/backends/__init__.py0000664000567000056710000000000012701407102025067 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/contrib/oauth1/backends/sql.py0000664000567000056710000000175212701407102024146 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from keystone.oauth1.backends import sql _OLD = "keystone.contrib.oauth1.backends.sql.OAuth1" _NEW = "sql" class OAuth1(sql.OAuth1): @versionutils.deprecated(versionutils.deprecated.MITAKA, in_favor_of=_NEW, what=_OLD) def __init__(self, *args, **kwargs): super(OAuth1, self).__init__(*args, **kwargs) keystone-9.0.0/keystone/contrib/oauth1/migrate_repo/0000775000567000056710000000000012701407246023704 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/contrib/oauth1/migrate_repo/versions/0000775000567000056710000000000012701407246025554 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/contrib/oauth1/migrate_repo/versions/002_fix_oauth_tables_fk.py0000664000567000056710000000130612701407102032476 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone import exception def upgrade(migrate_engine): raise exception.MigrationMovedFailure(extension='oauth1') keystone-9.0.0/keystone/contrib/oauth1/migrate_repo/versions/001_add_oauth_tables.py0000664000567000056710000000130612701407102031757 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone import exception def upgrade(migrate_engine): raise exception.MigrationMovedFailure(extension='oauth1') keystone-9.0.0/keystone/contrib/oauth1/migrate_repo/versions/004_request_token_roles_nullable.py0000664000567000056710000000130612701407102034452 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone import exception def upgrade(migrate_engine): raise exception.MigrationMovedFailure(extension='oauth1') keystone-9.0.0/keystone/contrib/oauth1/migrate_repo/versions/__init__.py0000664000567000056710000000000012701407102027642 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/contrib/oauth1/migrate_repo/versions/005_consumer_id_index.py0000664000567000056710000000132512701407102032200 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis.inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone import exception def upgrade(migrate_engine): raise exception.MigrationMovedFailure(extension='oauth1') keystone-9.0.0/keystone/contrib/oauth1/migrate_repo/versions/003_consumer_description_nullalbe.py0000664000567000056710000000130612701407102034613 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone import exception def upgrade(migrate_engine): raise exception.MigrationMovedFailure(extension='oauth1') keystone-9.0.0/keystone/contrib/oauth1/migrate_repo/__init__.py0000664000567000056710000000000012701407102025772 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/contrib/oauth1/migrate_repo/migrate.cfg0000664000567000056710000000231212701407102026002 0ustar jenkinsjenkins00000000000000[db_settings] # Used to identify which repository this database is versioned under. # You can use the name of your project. repository_id=oauth1 # The name of the database table used to track the schema version. # This name shouldn't already be used by your project. # If this is changed once a database is under version control, you'll need to # change the table name in each database too. version_table=migrate_version # When committing a change script, Migrate will attempt to generate the # sql for all supported databases; normally, if one of them fails - probably # because you don't have that database installed - it is ignored and the # commit continues, perhaps ending successfully. # Databases in this list MUST compile successfully during a commit, or the # entire commit will fail. List the databases your application will actually # be using to ensure your updates to that database work properly. # This must be a list; example: ['postgres','sqlite'] required_dbs=[] # When creating new change scripts, Migrate will stamp the new script with # a version number. By default this is latest_version + 1. You can set this # to 'true' to tell Migrate to use the UTC timestamp instead. use_timestamp_numbering=False keystone-9.0.0/keystone/contrib/oauth1/__init__.py0000664000567000056710000000000012701407102023315 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/contrib/oauth1/routers.py0000664000567000056710000000231212701407102023271 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from oslo_log import versionutils from keystone.common import wsgi from keystone.i18n import _ LOG = log.getLogger(__name__) class OAuth1Extension(wsgi.Middleware): def __init__(self, *args, **kwargs): super(OAuth1Extension, self).__init__(*args, **kwargs) msg = _("Remove oauth1_extension from the paste pipeline, the " "oauth1 extension is now always available. Update the " "[pipeline:api_v3] section in keystone-paste.ini accordingly, " "as it will be removed in the O release.") versionutils.report_deprecated_feature(LOG, msg) keystone-9.0.0/keystone/contrib/endpoint_filter/0000775000567000056710000000000012701407246023213 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/contrib/endpoint_filter/backends/0000775000567000056710000000000012701407246024765 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/contrib/endpoint_filter/backends/__init__.py0000664000567000056710000000000012701407102027053 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/contrib/endpoint_filter/backends/catalog_sql.py0000664000567000056710000000542012701407102027620 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from keystone.catalog.backends import sql from keystone.catalog import core as catalog_core from keystone.common import dependency CONF = cfg.CONF @dependency.requires('catalog_api') class EndpointFilterCatalog(sql.Catalog): def get_v3_catalog(self, user_id, project_id): substitutions = dict(CONF.items()) substitutions.update({ 'tenant_id': project_id, 'project_id': project_id, 'user_id': user_id, }) services = {} dict_of_endpoint_refs = (self.catalog_api. list_endpoints_for_project(project_id)) if (not dict_of_endpoint_refs and CONF.endpoint_filter.return_all_endpoints_if_no_filter): return super(EndpointFilterCatalog, self).get_v3_catalog( user_id, project_id) for endpoint_id, endpoint in dict_of_endpoint_refs.items(): if not endpoint['enabled']: # Skip disabled endpoints. continue service_id = endpoint['service_id'] services.setdefault( service_id, self.get_service(service_id)) service = services[service_id] del endpoint['service_id'] del endpoint['enabled'] del endpoint['legacy_endpoint_id'] # Include deprecated region for backwards compatibility endpoint['region'] = endpoint['region_id'] endpoint['url'] = catalog_core.format_url( endpoint['url'], substitutions) # populate filtered endpoints if 'endpoints' in services[service_id]: service['endpoints'].append(endpoint) else: service['endpoints'] = [endpoint] # format catalog catalog = [] for service_id, service in services.items(): formatted_service = {} formatted_service['id'] = service['id'] formatted_service['type'] = service['type'] formatted_service['name'] = service['name'] formatted_service['endpoints'] = service['endpoints'] catalog.append(formatted_service) return catalog keystone-9.0.0/keystone/contrib/endpoint_filter/backends/sql.py0000664000567000056710000000200512701407102026122 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from keystone.catalog.backends import sql _OLD = 'keystone.contrib.endpoint_filter.backends.sql.EndpointFilter' _NEW = 'sql' class EndpointFilter(sql.Catalog): @versionutils.deprecated( as_of=versionutils.deprecated.MITAKA, in_favor_of=_NEW, what=_OLD, remove_in=2) def __init__(self, *args, **kwargs): super(EndpointFilter, self).__init__(*args, **kwargs) keystone-9.0.0/keystone/contrib/endpoint_filter/migrate_repo/0000775000567000056710000000000012701407246025670 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/contrib/endpoint_filter/migrate_repo/versions/0000775000567000056710000000000012701407246027540 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/contrib/endpoint_filter/migrate_repo/versions/__init__.py0000664000567000056710000000000012701407102031626 0ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000015200000000000011213 Lustar 00000000000000keystone-9.0.0/keystone/contrib/endpoint_filter/migrate_repo/versions/001_add_endpoint_filtering_table.pykeystone-9.0.0/keystone/contrib/endpoint_filter/migrate_repo/versions/001_add_endpoint_filtering_tab0000664000567000056710000000131712701407102035355 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone import exception def upgrade(migrate_engine): raise exception.MigrationMovedFailure(extension='endpoint_filter') keystone-9.0.0/keystone/contrib/endpoint_filter/migrate_repo/versions/002_add_endpoint_groups.py0000664000567000056710000000132212701407102034507 0ustar jenkinsjenkins00000000000000# Copyright 2014 Hewlett-Packard Company # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone import exception def upgrade(migrate_engine): raise exception.MigrationMovedFailure(extension='endpoint_filter') keystone-9.0.0/keystone/contrib/endpoint_filter/migrate_repo/__init__.py0000664000567000056710000000000012701407102027756 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/contrib/endpoint_filter/migrate_repo/migrate.cfg0000664000567000056710000000232312701407102027770 0ustar jenkinsjenkins00000000000000[db_settings] # Used to identify which repository this database is versioned under. # You can use the name of your project. repository_id=endpoint_filter # The name of the database table used to track the schema version. # This name shouldn't already be used by your project. # If this is changed once a database is under version control, you'll need to # change the table name in each database too. version_table=migrate_version # When committing a change script, Migrate will attempt to generate the # sql for all supported databases; normally, if one of them fails - probably # because you don't have that database installed - it is ignored and the # commit continues, perhaps ending successfully. # Databases in this list MUST compile successfully during a commit, or the # entire commit will fail. List the databases your application will actually # be using to ensure your updates to that database work properly. # This must be a list; example: ['postgres','sqlite'] required_dbs=[] # When creating new change scripts, Migrate will stamp the new script with # a version number. By default this is latest_version + 1. You can set this # to 'true' to tell Migrate to use the UTC timestamp instead. use_timestamp_numbering=False keystone-9.0.0/keystone/contrib/endpoint_filter/__init__.py0000664000567000056710000000000012701407102025301 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/contrib/endpoint_filter/routers.py0000664000567000056710000000235312701407102025262 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from oslo_log import versionutils from keystone.common import wsgi from keystone.i18n import _ LOG = log.getLogger(__name__) class EndpointFilterExtension(wsgi.Middleware): def __init__(self, *args, **kwargs): super(EndpointFilterExtension, self).__init__(*args, **kwargs) msg = _("Remove endpoint_filter_extension from the paste pipeline, " "the endpoint filter extension is now always available. " "Update the [pipeline:api_v3] section in keystone-paste.ini " "accordingly as it will be removed in the O release.") versionutils.report_deprecated_feature(LOG, msg) keystone-9.0.0/keystone/contrib/revoke/0000775000567000056710000000000012701407246021321 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/contrib/revoke/backends/0000775000567000056710000000000012701407246023073 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/contrib/revoke/backends/__init__.py0000664000567000056710000000000012701407102025161 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/contrib/revoke/backends/sql.py0000664000567000056710000000170212701407102024233 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from keystone.revoke.backends import sql _OLD = "keystone.contrib.revoke.backends.sql.Revoke" _NEW = "sql" class Revoke(sql.Revoke): @versionutils.deprecated(versionutils.deprecated.MITAKA, in_favor_of=_NEW, what=_OLD) def __init__(self, *args, **kwargs): super(Revoke, self).__init__(*args, **kwargs) keystone-9.0.0/keystone/contrib/revoke/migrate_repo/0000775000567000056710000000000012701407246023776 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/contrib/revoke/migrate_repo/versions/0000775000567000056710000000000012701407246025646 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000015300000000000011214 Lustar 00000000000000keystone-9.0.0/keystone/contrib/revoke/migrate_repo/versions/002_add_audit_id_and_chain_to_revoke_table.pykeystone-9.0.0/keystone/contrib/revoke/migrate_repo/versions/002_add_audit_id_and_chain_to_revoke_ta0000664000567000056710000000123612701407102035262 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone import exception def upgrade(migrate_engine): raise exception.MigrationMovedFailure(extension='revoke') keystone-9.0.0/keystone/contrib/revoke/migrate_repo/versions/__init__.py0000664000567000056710000000000012701407102027734 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/contrib/revoke/migrate_repo/versions/001_revoke_table.py0000664000567000056710000000123612701407102031233 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone import exception def upgrade(migrate_engine): raise exception.MigrationMovedFailure(extension='revoke') keystone-9.0.0/keystone/contrib/revoke/migrate_repo/__init__.py0000664000567000056710000000000012701407102026064 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/contrib/revoke/migrate_repo/migrate.cfg0000664000567000056710000000231212701407102026074 0ustar jenkinsjenkins00000000000000[db_settings] # Used to identify which repository this database is versioned under. # You can use the name of your project. repository_id=revoke # The name of the database table used to track the schema version. # This name shouldn't already be used by your project. # If this is changed once a database is under version control, you'll need to # change the table name in each database too. version_table=migrate_version # When committing a change script, Migrate will attempt to generate the # sql for all supported databases; normally, if one of them fails - probably # because you don't have that database installed - it is ignored and the # commit continues, perhaps ending successfully. # Databases in this list MUST compile successfully during a commit, or the # entire commit will fail. List the databases your application will actually # be using to ensure your updates to that database work properly. # This must be a list; example: ['postgres','sqlite'] required_dbs=[] # When creating new change scripts, Migrate will stamp the new script with # a version number. By default this is latest_version + 1. You can set this # to 'true' to tell Migrate to use the UTC timestamp instead. use_timestamp_numbering=False keystone-9.0.0/keystone/contrib/revoke/__init__.py0000664000567000056710000000000012701407102023407 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/contrib/revoke/routers.py0000664000567000056710000000224212701407102023365 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from oslo_log import versionutils from keystone.common import wsgi from keystone.i18n import _ LOG = log.getLogger(__name__) class RevokeExtension(wsgi.Middleware): def __init__(self, *args, **kwargs): super(RevokeExtension, self).__init__(*args, **kwargs) msg = _("Remove revoke_extension from the paste pipeline, the " "revoke extension is now always available. Update the " "[pipeline:api_v3] section in keystone-paste.ini accordingly, " "as it will be removed in the O release.") versionutils.report_deprecated_feature(LOG, msg) keystone-9.0.0/keystone/contrib/user_crud/0000775000567000056710000000000012701407246022021 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/contrib/user_crud/__init__.py0000664000567000056710000000117012701407102024120 0ustar jenkinsjenkins00000000000000# Copyright 2012 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.contrib.user_crud.core import * # noqa keystone-9.0.0/keystone/contrib/user_crud/core.py0000664000567000056710000000227612701407102023321 0ustar jenkinsjenkins00000000000000# Copyright 2012 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from oslo_log import versionutils from keystone.common import wsgi from keystone.i18n import _ LOG = log.getLogger(__name__) class CrudExtension(wsgi.Middleware): def __init__(self, application): super(CrudExtension, self).__init__(application) msg = _("Remove user_crud_extension from the paste pipeline, the " "user_crud extension is now always available. Update" "the [pipeline:public_api] section in keystone-paste.ini " "accordingly, as it will be removed in the O release.") versionutils.report_deprecated_feature(LOG, msg) keystone-9.0.0/keystone/contrib/endpoint_policy/0000775000567000056710000000000012701407246023225 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/contrib/endpoint_policy/backends/0000775000567000056710000000000012701407246024777 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/contrib/endpoint_policy/backends/__init__.py0000664000567000056710000000000012701407102027065 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/contrib/endpoint_policy/backends/sql.py0000664000567000056710000000211712701407102026140 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from keystone.endpoint_policy.backends import sql _OLD = 'keystone.contrib.endpoint_policy.backends.sql.EndpointPolicy' _NEW = 'keystone.endpoint_policy.backends.sql.EndpointPolicy' class EndpointPolicy(sql.EndpointPolicy): @versionutils.deprecated(versionutils.deprecated.LIBERTY, in_favor_of=_NEW, remove_in=1, what=_OLD) def __init__(self, *args, **kwargs): super(EndpointPolicy, self).__init__(*args, **kwargs) keystone-9.0.0/keystone/contrib/endpoint_policy/migrate_repo/0000775000567000056710000000000012701407246025702 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/contrib/endpoint_policy/migrate_repo/versions/0000775000567000056710000000000012701407246027552 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000keystone-9.0.0/keystone/contrib/endpoint_policy/migrate_repo/versions/001_add_endpoint_policy_table.pykeystone-9.0.0/keystone/contrib/endpoint_policy/migrate_repo/versions/001_add_endpoint_policy_table.0000664000567000056710000000130412701407102035276 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone import exception def upgrade(migrate_engine): raise exception.MigrationMovedFailure(extension='endpoint_policy') keystone-9.0.0/keystone/contrib/endpoint_policy/migrate_repo/versions/__init__.py0000664000567000056710000000000012701407102031640 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/contrib/endpoint_policy/migrate_repo/__init__.py0000664000567000056710000000000012701407102027770 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/contrib/endpoint_policy/migrate_repo/migrate.cfg0000664000567000056710000000232312701407102030002 0ustar jenkinsjenkins00000000000000[db_settings] # Used to identify which repository this database is versioned under. # You can use the name of your project. repository_id=endpoint_policy # The name of the database table used to track the schema version. # This name shouldn't already be used by your project. # If this is changed once a database is under version control, you'll need to # change the table name in each database too. version_table=migrate_version # When committing a change script, Migrate will attempt to generate the # sql for all supported databases; normally, if one of them fails - probably # because you don't have that database installed - it is ignored and the # commit continues, perhaps ending successfully. # Databases in this list MUST compile successfully during a commit, or the # entire commit will fail. List the databases your application will actually # be using to ensure your updates to that database work properly. # This must be a list; example: ['postgres','sqlite'] required_dbs=[] # When creating new change scripts, Migrate will stamp the new script with # a version number. By default this is latest_version + 1. You can set this # to 'true' to tell Migrate to use the UTC timestamp instead. use_timestamp_numbering=False keystone-9.0.0/keystone/contrib/endpoint_policy/__init__.py0000664000567000056710000000000012701407102025313 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/contrib/endpoint_policy/routers.py0000664000567000056710000000210512701407102025267 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from keystone.common import wsgi _OLD = 'keystone.contrib.endpoint_policy.routers.EndpointPolicyExtension' _NEW = 'keystone.endpoint_policy.routers.Routers' class EndpointPolicyExtension(wsgi.Middleware): @versionutils.deprecated(versionutils.deprecated.LIBERTY, in_favor_of=_NEW, remove_in=1, what=_OLD) def __init__(self, *args, **kwargs): super(EndpointPolicyExtension, self).__init__(*args, **kwargs) keystone-9.0.0/keystone/contrib/simple_cert/0000775000567000056710000000000012701407246022334 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/contrib/simple_cert/__init__.py0000664000567000056710000000115712701407102024440 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.contrib.simple_cert.routers import SimpleCertExtension # noqa keystone-9.0.0/keystone/contrib/simple_cert/routers.py0000664000567000056710000000241312701407102024400 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from oslo_log import versionutils from keystone.common import wsgi from keystone.i18n import _ LOG = log.getLogger(__name__) class SimpleCertExtension(wsgi.Middleware): def __init__(self, application): super(SimpleCertExtension, self).__init__(application) msg = _("Remove simple_cert from the paste pipeline, the " "PKI and PKIz token providers are now deprecated and " "simple_cert was only used insupport of these token " "providers. Update the [pipeline:api_v3] section in " "keystone-paste.ini accordingly, as it will be removed in the " "O release.") versionutils.report_deprecated_feature(LOG, msg) keystone-9.0.0/keystone/contrib/admin_crud/0000775000567000056710000000000012701407246022133 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/contrib/admin_crud/__init__.py0000664000567000056710000000120112701407102024225 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.contrib.admin_crud.core import * # noqa keystone-9.0.0/keystone/contrib/admin_crud/core.py0000664000567000056710000000230712701407102023426 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from oslo_log import versionutils from keystone.common import wsgi from keystone.i18n import _ LOG = log.getLogger(__name__) class CrudExtension(wsgi.Middleware): def __init__(self, application): super(CrudExtension, self).__init__(application) msg = _("Remove admin_crud_extension from the paste pipeline, the " "admin_crud extension is now always available. Update" "the [pipeline:admin_api] section in keystone-paste.ini " "accordingly, as it will be removed in the O release.") versionutils.report_deprecated_feature(LOG, msg) keystone-9.0.0/keystone/i18n.py0000664000567000056710000000220412701407102017504 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """oslo.i18n integration module. See http://docs.openstack.org/developer/oslo.i18n/usage.html . """ import oslo_i18n _translators = oslo_i18n.TranslatorFactory(domain='keystone') # The primary translation function using the well-known name "_" _ = _translators.primary # Translators for log levels. # # The abbreviated names are meant to reflect the usual use of a short # name like '_'. The "L" is for "log" and the other letter comes from # the level. _LI = _translators.log_info _LW = _translators.log_warning _LE = _translators.log_error _LC = _translators.log_critical keystone-9.0.0/keystone/locale/0000775000567000056710000000000012701407246017625 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/locale/ru/0000775000567000056710000000000012701407246020253 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/locale/ru/LC_MESSAGES/0000775000567000056710000000000012701407246022040 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/locale/ru/LC_MESSAGES/keystone-log-critical.po0000664000567000056710000000201212701407102026572 0ustar jenkinsjenkins00000000000000# Translations template for keystone. # Copyright (C) 2015 OpenStack Foundation # This file is distributed under the same license as the keystone project. # # Translators: # OpenStack Infra , 2015. #zanata msgid "" msgstr "" "Project-Id-Version: keystone 9.0.0.0b2.dev256\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n" "POT-Creation-Date: 2016-01-18 05:50+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2014-08-31 03:19+0000\n" "Last-Translator: openstackjenkins \n" "Language: ru\n" "Plural-Forms: nplurals=4; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n" "%10<=4 && (n%100<12 || n%100>14) ? 1 : n%10==0 || (n%10>=5 && n%10<=9) || (n" "%100>=11 && n%100<=14)? 2 : 3);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Russian\n" #, python-format msgid "Unable to open template file %s" msgstr "Не удается открыть файл шаблона %s" keystone-9.0.0/keystone/locale/ru/LC_MESSAGES/keystone.po0000664000567000056710000021241212701407105024235 0ustar jenkinsjenkins00000000000000# Translations template for keystone. # Copyright (C) 2015 OpenStack Foundation # This file is distributed under the same license as the keystone project. # # Translators: # kogamatranslator49 , 2015 # sher , 2013 # sher , 2013 # Lucas Palm , 2015. #zanata # OpenStack Infra , 2015. #zanata # Grigory Mokhin , 2016. #zanata # Lucas Palm , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: keystone 9.0.0.0rc2.dev4\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n" "POT-Creation-Date: 2016-03-18 19:18+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-18 10:16+0000\n" "Last-Translator: Grigory Mokhin \n" "Language: ru\n" "Plural-Forms: nplurals=4; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n" "%10<=4 && (n%100<12 || n%100>14) ? 1 : n%10==0 || (n%10>=5 && n%10<=9) || (n" "%100>=11 && n%100<=14)? 2 : 3);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Russian\n" #, python-format msgid "%(detail)s" msgstr "%(detail)s" #, python-format msgid "%(driver)s is not supported driver version" msgstr "Версия драйвера %(driver)s не поддерживается" #, python-format msgid "" "%(entity)s name cannot contain the following reserved characters: %(chars)s" msgstr "" "Имя %(entity)s не может содержать следующие зарезервированные символы: " "%(chars)s" #, python-format msgid "" "%(event)s is not a valid notification event, must be one of: %(actions)s" msgstr "" "%(event)s не является допустимым событием уведомления, требуется одно из " "значений: %(actions)s" #, python-format msgid "%(host)s is not a trusted dashboard host" msgstr "%(host)s не является надежным хостом сводных панелей" #, python-format msgid "%(message)s %(amendment)s" msgstr "%(message)s %(amendment)s" #, python-format msgid "" "%(mod_name)s doesn't provide database migrations. The migration repository " "path at %(path)s doesn't exist or isn't a directory." msgstr "" "%(mod_name)s не обеспечивает перенос баз данных. Путь к хранилищу миграции " "%(path)s не существует или не является каталогом." #, python-format msgid "%(prior_role_id)s does not imply %(implied_role_id)s" msgstr "%(prior_role_id)s не подразумевает %(implied_role_id)s" #, python-format msgid "%(property_name)s cannot be less than %(min_length)s characters." msgstr "%(property_name)s не может быть короче %(min_length)s символов." #, python-format msgid "%(property_name)s is not a %(display_expected_type)s" msgstr "%(property_name)s не принадлежит к типу %(display_expected_type)s" #, python-format msgid "%(property_name)s should not be greater than %(max_length)s characters." msgstr "%(property_name)s не должен быть длинее %(max_length)s символов." #, python-format msgid "%(role_id)s cannot be an implied roles" msgstr "%(role_id)s не может быть подразумеваемой ролью" #, python-format msgid "%s cannot be empty." msgstr "%s не может быть пуст." #, python-format msgid "%s extension does not exist." msgstr "Расширение %s не существует" #, python-format msgid "%s field is required and cannot be empty" msgstr "Поле %s является обязательным и не может быть пустым" #, python-format msgid "%s field(s) cannot be empty" msgstr "Поле %s не может быть пустым" #, python-format msgid "" "%s for the LDAP identity backend has been deprecated in the Mitaka release " "in favor of read-only identity LDAP access. It will be removed in the \"O\" " "release." msgstr "" "%s для системы идентификации LDAP устарело Mitaka, вместо него используется " "идентификация LDAP с доступом только для чтения. Эта функция будет удалена в " "выпуске \"O\"." msgid "(Disable insecure_debug mode to suppress these details.)" msgstr "(Выключите режим insecure_debug, чтобы не показывать эти подробности.)" msgid "--all option cannot be mixed with other options" msgstr "опцию --all нельзя указывать вместе с другими опциями" msgid "A project-scoped token is required to produce a service catalog." msgstr "Для создания каталога службы необходим маркер уровня проекта." msgid "Access token is expired" msgstr "Срок действия ключа доступа истек" msgid "Access token not found" msgstr "Ключ доступа не найден" msgid "Additional authentications steps required." msgstr "Требуются дополнительные действия для идентификации." msgid "An unexpected error occurred when retrieving domain configs" msgstr "Возникла непредвиденная ошибка при получении конфигураций доменов" #, python-format msgid "An unexpected error occurred when trying to store %s" msgstr "При попытке сохранить %s произошла непредвиденная ошибка" msgid "An unexpected error prevented the server from fulfilling your request." msgstr "Из-за непредвиденной ошибки ваш запрос не был выполнен сервером." #, python-format msgid "" "An unexpected error prevented the server from fulfilling your request: " "%(exception)s" msgstr "" "Из-за непредвиденной ошибки ваш запрос не был выполнен сервером: " "%(exception)s" msgid "An unhandled exception has occurred: Could not find metadata." msgstr "" "Возникла необработанная исключительная ситуация: не удалось найти метаданные." msgid "At least one option must be provided" msgstr "Необходимо указать хотя бы одну опцию" msgid "At least one option must be provided, use either --all or --domain-name" msgstr "" "Должен быть указан хотя бы один параметр. Укажите --all или --domain-name" msgid "At least one role should be specified." msgstr "Необходимо указать по крайней мере одну роль." #, python-format msgid "" "Attempted automatic driver selection for assignment based upon " "[identity]\\driver option failed since driver %s is not found. Set " "[assignment]/driver to a valid driver in keystone config." msgstr "" "Не удалось автоматически выбрать драйвер на основе опции [identity]\\driver, " "так как драйвер %s не найден. Укажите требуемый драйвер в [assignment]/" "driver в конфигурации keystone." msgid "Attempted to authenticate with an unsupported method." msgstr "Попытка идентификации с использованием неподдерживаемого метода." msgid "" "Attempting to use OS-FEDERATION token with V2 Identity Service, use V3 " "Authentication" msgstr "" "Попытка использовать маркер OS-FEDERATION со службой идентификации версии 2. " "Следует использовать идентификацию версии 3" msgid "Authentication plugin error." msgstr "Ошибка модуля идентификации." #, python-format msgid "" "Backend `%(backend)s` is not a valid memcached backend. Valid backends: " "%(backend_list)s" msgstr "" "Базовая система `%(backend)s` не является допустимой базовой системой в кэше " "памяти. Допустимые базовые системы: %(backend_list)s" msgid "Cannot authorize a request token with a token issued via delegation." msgstr "" "Предоставить права доступа маркеру запроса с маркером, выданным посредством " "делегирования, невозможно." #, python-format msgid "Cannot change %(option_name)s %(attr)s" msgstr "Невозможно изменить %(option_name)s %(attr)s" msgid "Cannot change Domain ID" msgstr "Невозможно изменить ИД домена" msgid "Cannot change user ID" msgstr "Невозможно изменить ИД пользователя" msgid "Cannot change user name" msgstr "Невозможно изменить имя пользователя" #, python-format msgid "Cannot create an endpoint with an invalid URL: %(url)s" msgstr "Не удается создать конечную точку с помощью недопустимого URL: %(url)s" #, python-format msgid "Cannot create project with parent: %(project_id)s" msgstr "Не удается создать проект с родительским объектом: %(project_id)s" #, python-format msgid "" "Cannot create project, since it specifies its owner as domain %(domain_id)s, " "but specifies a parent in a different domain (%(parent_domain_id)s)." msgstr "" "Не удается создать проект, так как его владелец указан как домен " "%(domain_id)s, но его родительский объект задан в другом домене " "(%(parent_domain_id)s)." #, python-format msgid "" "Cannot create project, since its parent (%(domain_id)s) is acting as a " "domain, but project's specified parent_id (%(parent_id)s) does not match " "this domain_id." msgstr "" "Не удается создать проект, так как его родительский элемент (%(domain_id)s) " "работает в качестве домена, но parent_id (%(parent_id)s), указанный для " "проекта, не соответствует данному domain_id." msgid "Cannot delete a domain that is enabled, please disable it first." msgstr "Невозможно удалить включенный домен, сначала выключите его." #, python-format msgid "" "Cannot delete project %(project_id)s since its subtree contains enabled " "projects." msgstr "" "Невозможно удалить проект %(project_id)s, так как его поддерево содержит " "включенные проекты" #, python-format msgid "" "Cannot delete the project %s since it is not a leaf in the hierarchy. Use " "the cascade option if you want to delete a whole subtree." msgstr "" "Невозможно удалить проект %s, так как он не является конечным объектом в " "структуре. Используйте каскадную опцию для удаления всего поддерева." #, python-format msgid "" "Cannot disable project %(project_id)s since its subtree contains enabled " "projects." msgstr "" "Нельзя отключить проект %(project_id)s, так как его поддерево содержит " "включенные проекты" #, python-format msgid "Cannot enable project %s since it has disabled parents" msgstr "" "Не удается включить проект %s, так как у него отключены родительские объекты" msgid "Cannot list assignments sourced from groups and filtered by user ID." msgstr "" "Не удается показать список присвоений, полученных из групп и отфильтрованных " "по ИД пользователя." msgid "Cannot list request tokens with a token issued via delegation." msgstr "" "Показать список маркеров запросов с маркером, выданным посредством " "делегирования, невозможно." #, python-format msgid "Cannot open certificate %(cert_file)s. Reason: %(reason)s" msgstr "Не удалось открыть сертификат %(cert_file)s. Причина: %(reason)s" #, python-format msgid "Cannot remove role that has not been granted, %s" msgstr "Удалить роль, которая не была предоставлена, нельзя: %s" msgid "" "Cannot truncate a driver call without hints list as first parameter after " "self " msgstr "" "Невозможно отсечь вызов драйвера без списка подсказок в качестве первого " "параметра после самого себя " msgid "Cannot update domain_id of a project that has children." msgstr "" "Не разрешено обновлять domain_id для проекта, у которого есть дочерние " "объекты." msgid "" "Cannot use parents_as_list and parents_as_ids query params at the same time." msgstr "" "Нельзя использовать параметры запроса parents_as_list и parents_as_ids " "одновременно." msgid "" "Cannot use subtree_as_list and subtree_as_ids query params at the same time." msgstr "" "Нельзя использовать параметры запроса subtree_as_list и subtree_as_ids " "одновременно." msgid "Cascade update is only allowed for enabled attribute." msgstr "Каскадное обновление разрешено только для включенных атрибутов." msgid "" "Combining effective and group filter will always result in an empty list." msgstr "" "Сочетание действующего фильтра и фильтра группы всегда дает пустой список." msgid "" "Combining effective, domain and inherited filters will always result in an " "empty list." msgstr "" "Сочетание действующего фильтра, фильтра домена и унаследованного фильтра " "всегда дает пустой список." #, python-format msgid "Config API entity at /domains/%s/config" msgstr "Настроить элемент API в /domains/%s/config" #, python-format msgid "Conflict occurred attempting to store %(type)s - %(details)s" msgstr "При попытке сохранить %(type)s возник конфликт - %(details)s" #, python-format msgid "Conflicting region IDs specified: \"%(url_id)s\" != \"%(ref_id)s\"" msgstr "Указаны конфликтующие ИД регионов: \"%(url_id)s\" != \"%(ref_id)s\"" msgid "Consumer not found" msgstr "Приемник не найден" #, python-format msgid "" "Could not change immutable attribute(s) '%(attributes)s' in target %(target)s" msgstr "" "Изменить постоянный атрибут '%(attributes)s' в цели %(target)s невозможно" #, python-format msgid "" "Could not determine Identity Provider ID. The configuration option " "%(issuer_attribute)s was not found in the request environment." msgstr "" "Не удалось определить ИД поставщика идентификации. Опция конфигурации " "%(issuer_attribute)s не найдена в среде запроса." #, python-format msgid "" "Could not find %(group_or_option)s in domain configuration for domain " "%(domain_id)s" msgstr "" "Не найден пользователь/группа %(group_or_option)s в конфигурации домена " "%(domain_id)s" #, python-format msgid "Could not find Endpoint Group: %(endpoint_group_id)s" msgstr "Не найдена группа конечных точек: %(endpoint_group_id)s" msgid "Could not find Identity Provider identifier in environment" msgstr "Не удалось найти идентификатор поставщика идентификаторов в среде" #, python-format msgid "Could not find Identity Provider: %(idp_id)s" msgstr "Поставщик идентификаторов %(idp_id)s не найден" #, python-format msgid "Could not find Service Provider: %(sp_id)s" msgstr "Не удалось найти поставщик служб %(sp_id)s" #, python-format msgid "Could not find credential: %(credential_id)s" msgstr "Идентификационные данные %(credential_id)s не найдены" #, python-format msgid "Could not find domain: %(domain_id)s" msgstr "Домен %(domain_id)s не найден" #, python-format msgid "Could not find endpoint: %(endpoint_id)s" msgstr "Конечная точка %(endpoint_id)s не найдена" #, python-format msgid "" "Could not find federated protocol %(protocol_id)s for Identity Provider: " "%(idp_id)s" msgstr "" "Объединенный протокол %(protocol_id)s для поставщика идентификаторов " "%(idp_id)s не найден" #, python-format msgid "Could not find group: %(group_id)s" msgstr "Группа %(group_id)s не найдена" #, python-format msgid "Could not find mapping: %(mapping_id)s" msgstr "Отображение %(mapping_id)s не найдено" msgid "Could not find policy association" msgstr "Не найдена связь стратегии" #, python-format msgid "Could not find policy: %(policy_id)s" msgstr "Стратегия %(policy_id)s не найдена" #, python-format msgid "Could not find project: %(project_id)s" msgstr "Проект %(project_id)s не найден" #, python-format msgid "Could not find region: %(region_id)s" msgstr "Регион %(region_id)s не найден" #, python-format msgid "" "Could not find role assignment with role: %(role_id)s, user or group: " "%(actor_id)s, project or domain: %(target_id)s" msgstr "" "Не найдено присвоение роли %(role_id)s, пользователь/группа: %(actor_id)s, " "проект/домен: %(target_id)s" #, python-format msgid "Could not find role: %(role_id)s" msgstr "Роль %(role_id)s не найдена" #, python-format msgid "Could not find service: %(service_id)s" msgstr "Служба %(service_id)s не найдена" #, python-format msgid "Could not find token: %(token_id)s" msgstr "Ключ %(token_id)s не найден" #, python-format msgid "Could not find trust: %(trust_id)s" msgstr "Группа доверия %(trust_id)s не найдена" #, python-format msgid "Could not find user: %(user_id)s" msgstr "Пользователь %(user_id)s не найден" #, python-format msgid "Could not find version: %(version)s" msgstr "Версия %(version)s не найдена" #, python-format msgid "Could not find: %(target)s" msgstr "%(target)s не найдена" msgid "" "Could not map any federated user properties to identity values. Check debug " "logs or the mapping used for additional details." msgstr "" "Не удается связать объединенные свойства пользователя с идентификаторами. " "Дополнительные сведения о связывании приведены в протоколе отладки." msgid "" "Could not map user while setting ephemeral user identity. Either mapping " "rules must specify user id/name or REMOTE_USER environment variable must be " "set." msgstr "" "Не удалось привязать пользователя во время настройки временного " "идентификатора пользователя. Правила привязка должны указывать имя/ИД " "пользователя, либо должна быть задана переменная среды REMOTE_USER." msgid "Could not validate the access token" msgstr "Не удалось проверить ключ доступа" msgid "Credential belongs to another user" msgstr "Разрешение принадлежит другому пользователю" msgid "Credential signature mismatch" msgstr "Несовпадение подписи идентификационных данных" #, python-format msgid "" "Direct import of auth plugin %(name)r is deprecated as of Liberty in favor " "of its entrypoint from %(namespace)r and may be removed in N." msgstr "" "Прямой импорт модуля идентификации %(name)r устарел в Liberty и может быть " "удален в выпуске N. Вместо этого используется его точка входа из " "%(namespace)r." #, python-format msgid "" "Direct import of driver %(name)r is deprecated as of Liberty in favor of its " "entrypoint from %(namespace)r and may be removed in N." msgstr "" "Прямой импорт драйвера %(name)r устарел в Liberty и может быть удален в " "выпуске N. Вместо этого используется его точка входа из %(namespace)r." msgid "" "Disabling an entity where the 'enable' attribute is ignored by configuration." msgstr "" "Отключение сущности, при котором атрибут 'enable' в конфигурации " "игнорируется." #, python-format msgid "Domain (%s)" msgstr "Домен (%s)" #, python-format msgid "Domain cannot be named %s" msgstr "Домену нельзя присвоить имя %s" #, python-format msgid "Domain cannot have ID %s" msgstr "Домен не может иметь идентификатор %s" #, python-format msgid "Domain is disabled: %s" msgstr "Домен отключен: %s" msgid "Domain name cannot contain reserved characters." msgstr "Имя домена не может содержать зарезервированные символы." msgid "Domain scoped token is not supported" msgstr "Маркер, область которого - домен, не поддерживается" msgid "Domain specific roles are not supported in the V8 role driver" msgstr "Особые роли домена не поддерживаются в драйвере ролей V8" #, python-format msgid "" "Domain: %(domain)s already has a configuration defined - ignoring file: " "%(file)s." msgstr "" "У домена %(domain)s уже определена конфигурация - файл пропущен: %(file)s." msgid "Duplicate Entry" msgstr "Дубликат записи" #, python-format msgid "Duplicate ID, %s." msgstr "Повторяющийся идентификатор, %s." #, python-format msgid "Duplicate entry: %s" msgstr "Повторяющаяся запись: %s" #, python-format msgid "Duplicate name, %s." msgstr "Повторяющееся имя, %s." #, python-format msgid "Duplicate remote ID: %s" msgstr "Повторяющийся удаленный ИД: %s" msgid "EC2 access key not found." msgstr "Ключ доступа EC2 не найден." msgid "EC2 signature not supplied." msgstr "Не указана подпись EC2." msgid "" "Either --bootstrap-password argument or OS_BOOTSTRAP_PASSWORD must be set." msgstr "" "Необходимо указать аргумент --bootstrap-password или OS_BOOTSTRAP_PASSWORD." msgid "Enabled field must be a boolean" msgstr "Активное поле должно быть булевским значением" msgid "Enabled field should be a boolean" msgstr "Активное поле должно быть булевским значением" #, python-format msgid "Endpoint %(endpoint_id)s not found in project %(project_id)s" msgstr "Конечная точка %(endpoint_id)s не найдена в проекте %(project_id)s" msgid "Endpoint Group Project Association not found" msgstr "Не найдена связь проекта группы конечных точек" msgid "Ensure configuration option idp_entity_id is set." msgstr "Убедитесь, что указан параметр конфигурации idp_entity_id." msgid "Ensure configuration option idp_sso_endpoint is set." msgstr "Убедитесь, что указан параметр конфигурации idp_sso_endpoint." #, python-format msgid "" "Error parsing configuration file for domain: %(domain)s, file: %(file)s." msgstr "" "Ошибка анализа файла конфигурации для домена %(domain)s, файл: %(file)s." #, python-format msgid "Error while opening file %(path)s: %(err)s" msgstr "Ошибка при открытии файла %(path)s: %(err)s" #, python-format msgid "Error while parsing line: '%(line)s': %(err)s" msgstr "Ошибка при анализе строки: '%(line)s': %(err)s" #, python-format msgid "Error while parsing rules %(path)s: %(err)s" msgstr "Ошибка при анализе правил %(path)s: %(err)s" #, python-format msgid "Error while reading metadata file, %(reason)s" msgstr "Ошибка чтения файла метаданных: %(reason)s" #, python-format msgid "" "Exceeded attempts to register domain %(domain)s to use the SQL driver, the " "last domain that appears to have had it is %(last_domain)s, giving up" msgstr "" "Превышено число попыток регистрации домена %(domain)s для использования " "драйвера SQL. Последний домен, для которого это было сделано - " "%(last_domain)s. Больше попыток не будет" #, python-format msgid "Expected dict or list: %s" msgstr "Ожидается dict или list: %s" msgid "" "Expected signing certificates are not available on the server. Please check " "Keystone configuration." msgstr "" "Ожидаемые сертификаты подписания недоступны на сервере. Рекомендуется " "проверить конфигурацию Keystone." #, python-format msgid "" "Expecting to find %(attribute)s in %(target)s - the server could not comply " "with the request since it is either malformed or otherwise incorrect. The " "client is assumed to be in error." msgstr "" "Ожидается %(attribute)s в %(target)s - серверу не удалось удовлетворить " "запрос, поскольку его формат является неверным, либо запрос некорректен по " "другой причине. Предположительно, клиент находится в состоянии ошибки." #, python-format msgid "Failed to start the %(name)s server" msgstr "Не удалось запустить сервер %(name)s" msgid "Failed to validate token" msgstr "Проверить маркер не удалось" msgid "Federation token is expired" msgstr "Срок действия ключа объединения истек" #, python-format msgid "" "Field \"remaining_uses\" is set to %(value)s while it must not be set in " "order to redelegate a trust" msgstr "" "Полю \"remaining_uses\" присвоено значение %(value)s, хотя поле не может " "быть задано для изменения делегирования группы доверия" msgid "Found invalid token: scoped to both project and domain." msgstr "" "Обнаружен недопустимый маркер: он относится и к уровню проекта, и к уровню " "домена." #, python-format msgid "Group %s not found in config" msgstr "Группа %s не найдена в конфигурации" #, python-format msgid "Group %(group)s is not supported for domain specific configurations" msgstr "" "Группа %(group)s не поддерживается для определенных конфигураций домена" #, python-format msgid "" "Group %(group_id)s returned by mapping %(mapping_id)s was not found in the " "backend." msgstr "" "Группа %(group_id)s, возвращенная преобразованием %(mapping_id)s, не найдена " "в на базовом сервере." #, python-format msgid "" "Group membership across backend boundaries is not allowed, group in question " "is %(group_id)s, user is %(user_id)s" msgstr "" "Членство в группе не может распространяться через границы базовых систем, " "группа под вопросом - %(group_id)s, пользователь - %(user_id)s" #, python-format msgid "ID attribute %(id_attr)s not found in LDAP object %(dn)s" msgstr "Атрибут ИД %(id_attr)s не найден в объекте LDAP %(dn)s" #, python-format msgid "Identity Provider %(idp)s is disabled" msgstr "Поставщик идентификаторов %(idp)s отключен" msgid "" "Incoming identity provider identifier not included among the accepted " "identifiers." msgstr "" "Входящий идентификатор поставщика идентификаторов не включен в принятые " "идентификаторы." msgid "Invalid EC2 signature." msgstr "Недопустимая подпись EC2." #, python-format msgid "Invalid LDAP TLS certs option: %(option)s. Choose one of: %(options)s" msgstr "" "Недопустимая опция certs TLS LDAP: %(option)s. Выберите одно из следующих " "значений: %(options)s" #, python-format msgid "Invalid LDAP TLS_AVAIL option: %s. TLS not available" msgstr "Недопустимая опция TLS_AVAIL LDAP: %s. TLS недоступен" #, python-format msgid "Invalid LDAP deref option: %(option)s. Choose one of: %(options)s" msgstr "" "Недопустимая опция deref LDAP: %(option)s. Выберите одно из следующих " "значений: %(options)s" #, python-format msgid "Invalid LDAP scope: %(scope)s. Choose one of: %(options)s" msgstr "" "Недопустимая область LDAP: %(scope)s. Выберите одно из следующих значений: " "%(options)s" msgid "Invalid TLS / LDAPS combination" msgstr "Недопустимое сочетание TLS/LDAPS" #, python-format msgid "Invalid audit info data type: %(data)s (%(type)s)" msgstr "Недопустимый тип данных в информации контроля: %(data)s (%(type)s)" msgid "Invalid blob in credential" msgstr "Недопустимый большой двоичный объект в разрешении" #, python-format msgid "" "Invalid domain name: %(domain)s found in config file name: %(file)s - " "ignoring this file." msgstr "" "Обнаружено недопустимое имя домена %(domain)s в файле конфигурации %(file)s " "- файл пропущен." #, python-format msgid "Invalid domain specific configuration: %(reason)s" msgstr "Недопустимая конфигурация для домена: %(reason)s" #, python-format msgid "Invalid input for field '%(path)s'. The value is '%(value)s'." msgstr "Недопустимый ввод для поля '%(path)s'. Значение - '%(value)s'." msgid "Invalid limit value" msgstr "Недопустимое значение ограничения" #, python-format msgid "" "Invalid mix of entities for policy association - only Endpoint, Service or " "Region+Service allowed. Request was - Endpoint: %(endpoint_id)s, Service: " "%(service_id)s, Region: %(region_id)s" msgstr "" "Недопустимое смешение сущностей для связывания стратегии. Только Конечная " "точка, Служба и Регион+Служба разрешены. В запросе было: Конечная точка " "%(endpoint_id)s, Служба %(service_id)s, Регион %(region_id)s" #, python-format msgid "" "Invalid rule: %(identity_value)s. Both 'groups' and 'domain' keywords must " "be specified." msgstr "" "Недопустимое правило: %(identity_value)s. Ключевые слова 'groups' и 'domain' " "должны быть указаны." msgid "Invalid signature" msgstr "Недопустимая подпись" msgid "Invalid user / password" msgstr "Недопустимый пользователь / пароль" msgid "Invalid username or TOTP passcode" msgstr "Недопустимое имя пользователя или пароль TOTP" msgid "Invalid username or password" msgstr "Недопустимое имя пользователя или пароль" #, python-format msgid "KVS region %s is already configured. Cannot reconfigure." msgstr "Регион KVS %s уже настроен. Изменение конфигурации невозможно." #, python-format msgid "Key Value Store not configured: %s" msgstr "Хранилище значений ключей не настроено: %s" #, python-format msgid "LDAP %s create" msgstr "LDAP %s создание" #, python-format msgid "LDAP %s delete" msgstr "LDAP %s удаление" #, python-format msgid "LDAP %s update" msgstr "LDAP %s обновление" msgid "" "Length of transformable resource id > 64, which is max allowed characters" msgstr "" "Длина ИД преобразуемого ресурса > 64 символов, то есть превышает максимально " "допустимую" #, python-format msgid "" "Local section in mapping %(mapping_id)s refers to a remote match that " "doesn't exist (e.g. {0} in a local section)." msgstr "" "Локальный раздел в преобразовании %(mapping_id)s указывает на удаленное " "совпадение, которое не существует (например, {0} в локальном разделе)." #, python-format msgid "Lock Timeout occurred for key, %(target)s" msgstr "Наступил тайм-аут блокировки для ключа, %(target)s" #, python-format msgid "Lock key must match target key: %(lock)s != %(target)s" msgstr "" "Блокировка должна соответствовать целевому ключу: %(lock)s != %(target)s" #, python-format msgid "Malformed endpoint URL (%(endpoint)s), see ERROR log for details." msgstr "" "Неверный формат URL конечной точки (%(endpoint)s), подробную информацию см. " "в протоколе ОШИБОК." msgid "Marker could not be found" msgstr "Не удалось найти маркер" #, python-format msgid "Max hierarchy depth reached for %s branch." msgstr "Для ветви %s достигнута максимальная глубина иерархии." #, python-format msgid "Maximum lock attempts on %s occurred." msgstr "Выполнено максимальное число попыток блокировки в %s." #, python-format msgid "Member %(member)s is already a member of group %(group)s" msgstr "Элемент %(member)s уже является участником группы %(group)s" #, python-format msgid "Method not callable: %s" msgstr "Вызов метода невозможен: %s" msgid "Missing entity ID from environment" msgstr "В среде отсутствует ИД сущности" msgid "" "Modifying \"redelegation_count\" upon redelegation is forbidden. Omitting " "this parameter is advised." msgstr "" "Изменение параметра \"redelegation_count\" во время изменения делегирования " "запрещено. Возможен пропуск этого параметра." msgid "Multiple domains are not supported" msgstr "Множественные домены не поддерживаются" msgid "Must be called within an active lock context." msgstr "Требуется вызов в контексте активной блокировки." msgid "Must specify either domain or project" msgstr "Необходимо указать домен или проект" msgid "Name field is required and cannot be empty" msgstr "Поле имени является обязательным и не может быть пустым" msgid "Neither Project Domain ID nor Project Domain Name was provided." msgstr "Не указаны ни ИД домена проекта, ни имя домена проекта." msgid "" "No Authorization headers found, cannot proceed with OAuth related calls, if " "running under HTTPd or Apache, ensure WSGIPassAuthorization is set to On." msgstr "" "Не найдены заголовки предоставления доступа - вызовы, связанные с OAuth, " "невозможны при выполнении под управлением HTTPd или Apache. Убедитесь, что " "параметру WSGIPassAuthorization присвоено значение On." msgid "No authenticated user" msgstr "Нет идентифицированного пользователя" msgid "" "No encryption keys found; run keystone-manage fernet_setup to bootstrap one." msgstr "" "Не найдены ключи шифрования. Выполните команду keystone-manage fernet_setup, " "чтобы создать ключ." msgid "No options specified" msgstr "Параметры не указаны" #, python-format msgid "No policy is associated with endpoint %(endpoint_id)s." msgstr "С конечной точкой %(endpoint_id)s не связано ни одной стратегии." #, python-format msgid "No remaining uses for trust: %(trust_id)s" msgstr "Вариантов использования группы доверия %(trust_id)s не осталось" msgid "No token in the request" msgstr "В запросе отсутствует маркер" msgid "Non-default domain is not supported" msgstr "Домен, отличный от применяемого по умолчанию, не поддерживается" msgid "One of the trust agents is disabled or deleted" msgstr "Один из доверенных агентов отключен или удален" #, python-format msgid "" "Option %(option)s found with no group specified while checking domain " "configuration request" msgstr "" "Обнаружен параметр %(option)s без указанной группы во время проверки запроса " "на настройку домена" #, python-format msgid "" "Option %(option)s in group %(group)s is not supported for domain specific " "configurations" msgstr "" "Параметр %(option)s в группе %(group)s не поддерживается для определенных " "конфигураций домена" #, python-format msgid "Project (%s)" msgstr "Проект (%s)" #, python-format msgid "Project ID not found: %(t_id)s" msgstr "Не найден ИД проекта: %(t_id)s" msgid "Project field is required and cannot be empty." msgstr "Поле проекта является обязательным и не может быть пустым." #, python-format msgid "Project is disabled: %s" msgstr "Проект отключен: %s" msgid "Project name cannot contain reserved characters." msgstr "Имя проекта не может содержать зарезервированные символы." msgid "Query string is not UTF-8 encoded" msgstr "Строка запроса указана в кодировке, отличной от UTF-8" #, python-format msgid "" "Reading the default for option %(option)s in group %(group)s is not supported" msgstr "" "Чтение значения по умолчанию для параметра %(option)s в группе %(group)s не " "поддерживается" msgid "Redelegation allowed for delegated by trust only" msgstr "Изменение делегирования разрешено только для доверенного пользователя" #, python-format msgid "" "Remaining redelegation depth of %(redelegation_depth)d out of allowed range " "of [0..%(max_count)d]" msgstr "" "Оставшаяся глубина изменения делегирования %(redelegation_depth)d выходит за " "пределы разрешенного диапазона [0..%(max_count)d]" msgid "" "Remove admin_crud_extension from the paste pipeline, the admin_crud " "extension is now always available. Updatethe [pipeline:admin_api] section in " "keystone-paste.ini accordingly, as it will be removed in the O release." msgstr "" "Удалите admin_crud_extension из конвейера вставки, расширение admin_crud " "теперь доступно всегда. Обновите раздел [pipeline:admin_api] в файле " "keystone-paste.ini соответственно, так как он будет удален в выпуске O." msgid "" "Remove endpoint_filter_extension from the paste pipeline, the endpoint " "filter extension is now always available. Update the [pipeline:api_v3] " "section in keystone-paste.ini accordingly as it will be removed in the O " "release." msgstr "" "Удалите endpoint_filter_extension из конвейера вставки, расширение фильтра " "конечной точки теперь доступно всегда. Обновите раздел [pipeline:api_v3] в " "файле keystone-paste.ini соответственно, так как он будет удален в выпуске O." msgid "" "Remove federation_extension from the paste pipeline, the federation " "extension is now always available. Update the [pipeline:api_v3] section in " "keystone-paste.ini accordingly, as it will be removed in the O release." msgstr "" "Удалите federation_filter_extension из конвейера вставки, расширение " "объединения теперь доступно всегда. Обновите раздел [pipeline:api_v3] в " "файле keystone-paste.ini соответственно, так как он будет удален в выпуске O." msgid "" "Remove oauth1_extension from the paste pipeline, the oauth1 extension is now " "always available. Update the [pipeline:api_v3] section in keystone-paste.ini " "accordingly, as it will be removed in the O release." msgstr "" "Удалите oauth1_filter_extension из конвейера вставки, расширение oauth1 " "теперь доступно всегда. Обновите раздел [pipeline:api_v3] в файле keystone-" "paste.ini соответственно, так как он будет удален в выпуске O." msgid "" "Remove revoke_extension from the paste pipeline, the revoke extension is now " "always available. Update the [pipeline:api_v3] section in keystone-paste.ini " "accordingly, as it will be removed in the O release." msgstr "" "Удалите revoke_filter_extension из конвейера вставки, расширение отзыва " "теперь доступно всегда. Обновите раздел [pipeline:api_v3] в файле keystone-" "paste.ini соответственно, так как он будет удален в выпуске O." msgid "" "Remove simple_cert from the paste pipeline, the PKI and PKIz token providers " "are now deprecated and simple_cert was only used insupport of these token " "providers. Update the [pipeline:api_v3] section in keystone-paste.ini " "accordingly, as it will be removed in the O release." msgstr "" "Удалите simple_cert из конвейера вставки, теперь поставщики ключей PKI и " "PKIz устарели, а simple_cert использовался только для поддержки этих " "поставщиков. Обновите раздел [pipeline:api_v3] в файле keystone-paste.ini " "соответственно, так как он будет удален в выпуске O." msgid "" "Remove user_crud_extension from the paste pipeline, the user_crud extension " "is now always available. Updatethe [pipeline:public_api] section in keystone-" "paste.ini accordingly, as it will be removed in the O release." msgstr "" "Удалите user_crud_extension из конвейера вставки, расширение user_crud " "теперь доступно всегда. Обновите раздел [pipeline:public_api] в файле " "keystone-paste.ini соответственно, так как он будет удален в выпуске O." msgid "Request Token does not have an authorizing user id" msgstr "" "Маркер запроса не содержит ИД пользователя для предоставления прав доступа" #, python-format msgid "" "Request attribute %(attribute)s must be less than or equal to %(size)i. The " "server could not comply with the request because the attribute size is " "invalid (too large). The client is assumed to be in error." msgstr "" "Атрибут запроса %(attribute)s не может быть больше %(size)i. Серверу не " "удалось удовлетворить запрос, поскольку размер атрибута является " "недопустимым (слишком большой). Предположительно, клиент находится в " "состоянии ошибки." msgid "Request must have an origin query parameter" msgstr "Запрос должен содержать параметр origin" msgid "Request token is expired" msgstr "Срок действия маркера запроса истек" msgid "Request token not found" msgstr "Маркер запроса не найден" msgid "Requested expiration time is more than redelegated trust can provide" msgstr "" "Запрошенное время истечения срока действия превышает значение, которое может " "указать доверенный пользователь" #, python-format msgid "" "Requested redelegation depth of %(requested_count)d is greater than allowed " "%(max_count)d" msgstr "" "Запрошенная глубина изменения делегирования %(requested_count)d превышает " "разрешенную %(max_count)d" msgid "" "Running keystone via eventlet is deprecated as of Kilo in favor of running " "in a WSGI server (e.g. mod_wsgi). Support for keystone under eventlet will " "be removed in the \"M\"-Release." msgstr "" "Выполнение Keystone через библиотеку eventlet устарело начиная с выпуска " "Kilo. Следует выполнять на сервере WSGI (например, mod_wsgi). Поддержка " "keystone в библиотеке eventlet будет убрана в выпуске \"M\"." msgid "Scoping to both domain and project is not allowed" msgstr "Назначать и домен, и проект в качестве области нельзя" msgid "Scoping to both domain and trust is not allowed" msgstr "Назначать и домен, и группу доверия в качестве области нельзя" msgid "Scoping to both project and trust is not allowed" msgstr "Назначать и проект, и группу доверия в качестве области нельзя" #, python-format msgid "Service Provider %(sp)s is disabled" msgstr "Поставщик службы %(sp)s отключен" msgid "Some of requested roles are not in redelegated trust" msgstr "" "Некоторые из запрошенных ролей не относятся к доверенному пользователю с " "измененными полномочиями" msgid "Specify a domain or project, not both" msgstr "Укажите домен или проект, но не то и другое" msgid "Specify a user or group, not both" msgstr "Укажите пользователя или группу, но не то и другое" msgid "Specify one of domain or project" msgstr "Укажите один домен или проект" msgid "Specify one of user or group" msgstr "Укажите одного пользователя или группу" #, python-format msgid "" "String length exceeded.The length of string '%(string)s' exceeded the limit " "of column %(type)s(CHAR(%(length)d))." msgstr "" "Превышена длина строки. Длина строки '%(string)s' превышает ограничение " "столбца %(type)s(CHAR(%(length)d))." msgid "Tenant name cannot contain reserved characters." msgstr "Имя арендатора не может содержать зарезервированные символы." #, python-format msgid "" "The %s extension has been moved into keystone core and as such its " "migrations are maintained by the main keystone database control. Use the " "command: keystone-manage db_sync" msgstr "" "Расширение %s было перемещено в ядро keystone, и его перенос поддерживается " "основной системой управления базы данных keystone. Используйте команду: " "keystone-manage db_sync" msgid "" "The 'expires_at' must not be before now. The server could not comply with " "the request since it is either malformed or otherwise incorrect. The client " "is assumed to be in error." msgstr "" "Значение параметра 'expires_at' не должно быть меньше настоящего времени. " "Серверу не удалось исполнить запрос, так как он поврежден или неправильно " "сформирован. Предположительно, клиент находится в состоянии ошибки." msgid "The --all option cannot be used with the --domain-name option" msgstr "Параметр --all нельзя указывать вместе с параметром --domain-name" #, python-format msgid "The Keystone configuration file %(config_file)s could not be found." msgstr "Не удалось найти файл конфигурации Keystone %(config_file)s." #, python-format msgid "" "The Keystone domain-specific configuration has specified more than one SQL " "driver (only one is permitted): %(source)s." msgstr "" "В конфигурации для домена Keystone указано несколько драйверов SQL (допустим " "только один): %(source)s." msgid "The action you have requested has not been implemented." msgstr "Запрошенное действие не реализовано." msgid "The authenticated user should match the trustor." msgstr "Идентифицированный пользователь должен соответствовать доверителю." msgid "" "The certificates you requested are not available. It is likely that this " "server does not use PKI tokens otherwise this is the result of " "misconfiguration." msgstr "" "Запрошенные сертификаты недоступны. Вероятно, данный сервер не использует " "маркеры PKI, в противном случае, это является следствием ошибки в " "конфигурации." msgid "The configured token provider does not support bind authentication." msgstr "Настроенный модуль маркера не поддерживает идентификацию привязки." msgid "The creation of projects acting as domains is not allowed in v2." msgstr "Создание проектов, работающих в качестве доменов, не разрешено в v2." #, python-format msgid "" "The password length must be less than or equal to %(size)i. The server could " "not comply with the request because the password is invalid." msgstr "" "Длина пароля не должна превышать %(size)i. Сервер не может выполнить запрос, " "поскольку пароль недопустим." msgid "The request you have made requires authentication." msgstr "Выданный запрос требует идентификации." msgid "The resource could not be found." msgstr "Ресурс не найден." msgid "" "The revoke call must not have both domain_id and project_id. This is a bug " "in the Keystone server. The current request is aborted." msgstr "" "В вызове revoke должны быть указаны domain_id и project_id. Это ошибка в " "коде сервера Keystone. Текущий запрос прерван." msgid "The service you have requested is no longer available on this server." msgstr "Запрошенная служба более не доступна на данном сервере." #, python-format msgid "" "The specified parent region %(parent_region_id)s would create a circular " "region hierarchy." msgstr "" "Заданная родительская область %(parent_region_id)s создаст круговую " "структуру области." #, python-format msgid "" "The value of group %(group)s specified in the config should be a dictionary " "of options" msgstr "" "Значение группы %(group)s, указанное в конфигурации, должно быть словарем " "параметров" msgid "There should not be any non-oauth parameters" msgstr "Не допускаются параметры, отличные от oauth" #, python-format msgid "This is not a recognized Fernet payload version: %s" msgstr "Это не распознанная версия полезной нагрузки Fernet: %s" #, python-format msgid "This is not a recognized Fernet token %s" msgstr "Это не маркер Fernet: %s" msgid "" "Timestamp not in expected format. The server could not comply with the " "request since it is either malformed or otherwise incorrect. The client is " "assumed to be in error." msgstr "" "Метка в неожиданном формате. Сервер не может выполнить запрос, поскольку он " "либо искажен или неправилен. Клиент, как предполагается, является ошибочным." #, python-format msgid "" "To get a more detailed information on this error, re-run this command for " "the specific domain, i.e.: keystone-manage domain_config_upload --domain-" "name %s" msgstr "" "Для получения дополнительной информации об этой ошибке еще раз выполните эту " "команду для конкретного домена. Пример: keystone-manage domain_config_upload " "--domain-name %s" msgid "Token belongs to another user" msgstr "Маркер принадлежит другому пользователю" msgid "Token does not belong to specified tenant." msgstr "Маркер не принадлежит указанному арендатору." msgid "Token version is unrecognizable or unsupported." msgstr "Версия маркера не распознана либо не поддерживается." msgid "Trustee has no delegated roles." msgstr "У доверенного лица нет делегированных ролей." msgid "Trustor is disabled." msgstr "Доверитель отключен." #, python-format msgid "" "Trying to update group %(group)s, so that, and only that, group must be " "specified in the config" msgstr "" "Изменение группы %(group)s, чтобы группа должна была указываться только в " "конфигурации" #, python-format msgid "" "Trying to update option %(option)s in group %(group)s, but config provided " "contains option %(option_other)s instead" msgstr "" "Изменение параметра %(option)s в группе %(group)s, однако переданная " "конфигурация содержит параметр %(option_other)s вместо него" #, python-format msgid "" "Trying to update option %(option)s in group %(group)s, so that, and only " "that, option must be specified in the config" msgstr "" "Изменение параметра %(option)s в группе %(group)s, чтобы параметр должен был " "указываться только в конфигурации" msgid "" "Unable to access the keystone database, please check it is configured " "correctly." msgstr "" "Нет доступа к базе данных Keystone. Убедитесь, что она настроена правильно." #, python-format msgid "Unable to consume trust %(trust_id)s, unable to acquire lock." msgstr "Принять группу доверия %(trust_id)s и захватить блокировку невозможно." #, python-format msgid "" "Unable to delete region %(region_id)s because it or its child regions have " "associated endpoints." msgstr "" "Не удалось удалить регион %(region_id)s: регион или его дочерние регионы " "имеют связанные конечные точки." msgid "Unable to downgrade schema" msgstr "Не удается понизить версию схемы" #, python-format msgid "Unable to find valid groups while using mapping %(mapping_id)s" msgstr "" "Невозможно найти допустимые группы при использовании преобразования " "%(mapping_id)s" #, python-format msgid "Unable to locate domain config directory: %s" msgstr "Не удалось найти каталог конфигурации домена: %s" #, python-format msgid "Unable to lookup user %s" msgstr "Найти пользователя %s невозможно" #, python-format msgid "" "Unable to reconcile identity attribute %(attribute)s as it has conflicting " "values %(new)s and %(old)s" msgstr "" "Согласовать атрибут идентификатора, %(attribute)s, невозможно, поскольку он " "содержит конфликтующие значения %(new)s и %(old)s" #, python-format msgid "" "Unable to sign SAML assertion. It is likely that this server does not have " "xmlsec1 installed, or this is the result of misconfiguration. Reason " "%(reason)s" msgstr "" "Не удалось подписать утверждение SAML. Вероятно, на этом сервере не " "установлена программа xmlsec1 или это результат неправильной настройки. " "Причина: %(reason)s" msgid "Unable to sign token." msgstr "Подписать маркер невозможно." #, python-format msgid "Unexpected assignment type encountered, %s" msgstr "Обнаружен непредвиденный тип назначения, %s" #, python-format msgid "" "Unexpected combination of grant attributes - User: %(user_id)s, Group: " "%(group_id)s, Project: %(project_id)s, Domain: %(domain_id)s" msgstr "" "Непредвиденная комбинация атрибутов предоставления доступа - пользователь: " "%(user_id)s, группа: %(group_id)s, проект: %(project_id)s, домен: " "%(domain_id)s" #, python-format msgid "Unexpected status requested for JSON Home response, %s" msgstr "Запрошено неожиданное состояние для ответа JSON Home, %s" msgid "Unknown Target" msgstr "Неизвестный целевой объект" #, python-format msgid "Unknown domain '%(name)s' specified by --domain-name" msgstr "В опции --domain-name указано неизвестное имя домена '%(name)s'" #, python-format msgid "Unknown token version %s" msgstr "Неизвестная версия маркера %s" #, python-format msgid "Unregistered dependency: %(name)s for %(targets)s" msgstr "Незарегистрированная зависимость %(name)s для %(targets)s" msgid "Update of `domain_id` is not allowed." msgstr "Обновление `domain_id` не разрешено." msgid "Update of `is_domain` is not allowed." msgstr "Обновление `is_domain` не разрешено." msgid "Update of `parent_id` is not allowed." msgstr "Обновление `parent_id` не разрешено." msgid "Update of domain_id is only allowed for root projects." msgstr "Обновление domain_id разрешено только для корневых проектов." msgid "Update of domain_id of projects acting as domains is not allowed." msgstr "" "Не разрешено обновлять domain_id для проектов, работающих в качестве доменов." msgid "Use a project scoped token when attempting to create a SAML assertion" msgstr "Использовать локальный ключ проекта при создании утверждения SAML" msgid "" "Use of the identity driver config to automatically configure the same " "assignment driver has been deprecated, in the \"O\" release, the assignment " "driver will need to be expicitly configured if different than the default " "(SQL)." msgstr "" "Использование конфигурации драйвера идентификатора для автоматической " "настройки такого же драйвера присвоения устарело. В выпуске \"O\" драйвер " "присвоения должен будет настраиваться явным образом, если он не совпадает с " "драйвером по умолчанию (SQL)." #, python-format msgid "User %(u_id)s is unauthorized for tenant %(t_id)s" msgstr "У пользователя %(u_id)s нет доступа к арендатору %(t_id)s" #, python-format msgid "User %(user_id)s has no access to domain %(domain_id)s" msgstr "У пользователя %(user_id)s нет доступа к домену %(domain_id)s" #, python-format msgid "User %(user_id)s has no access to project %(project_id)s" msgstr "У пользователя %(user_id)s нет доступа к проекту %(project_id)s" #, python-format msgid "User %(user_id)s is already a member of group %(group_id)s" msgstr "Пользователь %(user_id)s уже является участником группы %(group_id)s" #, python-format msgid "User '%(user_id)s' not found in group '%(group_id)s'" msgstr "Пользователь '%(user_id)s' не найден в группе '%(group_id)s'" msgid "User IDs do not match" msgstr "ИД пользователей не совпадают" msgid "" "User auth cannot be built due to missing either user id, or user name with " "domain id, or user name with domain name." msgstr "" "Не удалось скомпоновать идентификацию пользователя, так как отсутствует ИД " "пользователя, имя пользователя с ИД домена либо имя пользователя с именем " "домена." #, python-format msgid "User is disabled: %s" msgstr "Пользователь отключен: %s" msgid "User is not a member of the requested project" msgstr "Пользователь не является участником запрошенного проекта" msgid "User is not a trustee." msgstr "Пользователь не является доверенным лицом." msgid "User not found" msgstr "Пользователь не найден" msgid "User not valid for tenant." msgstr "Недопустимый пользователь для арендатора." msgid "User roles not supported: tenant_id required" msgstr "Роли пользователей не поддерживаются, требуется tenant_id" #, python-format msgid "User type %s not supported" msgstr "Тип пользователя %s не поддерживается" msgid "You are not authorized to perform the requested action." msgstr "У вас нет прав на выполнение запрашиваемого действия." #, python-format msgid "You are not authorized to perform the requested action: %(action)s" msgstr "У вас нет прав на выполнение запрошенного действия: %(action)s" msgid "" "You have tried to create a resource using the admin token. As this token is " "not within a domain you must explicitly include a domain for this resource " "to belong to." msgstr "" "Попытка создания ресурса с помощью административного маркера. Так как этот " "маркер не принадлежит домену, необходимо явно указать домен, которому будет " "принадлежать ресурс." msgid "`key_mangler` functions must be callable." msgstr "Функции `key_mangler` должны быть доступны для вызова." msgid "`key_mangler` option must be a function reference" msgstr "Опция `key_mangler` должна быть ссылкой на функцию" msgid "any options" msgstr "любые параметры" msgid "auth_type is not Negotiate" msgstr "auth_type отличен от Negotiate" msgid "authorizing user does not have role required" msgstr "" "пользователю, предоставляющему права доступа, не присвоена требуемая роль" #, python-format msgid "cannot create a project in a branch containing a disabled project: %s" msgstr "Нельзя создать проект в ветви, содержащей отключенный проект: %s" #, python-format msgid "" "cannot delete an enabled project acting as a domain. Please disable the " "project %s first." msgstr "" "Невозможно удалить включенный проект, работающий как домен. Сначала " "выключите проект %s." #, python-format msgid "group %(group)s" msgstr "группа %(group)s" msgid "" "idp_contact_type must be one of: [technical, other, support, administrative " "or billing." msgstr "" "Значение idp_contact_type должно быть одним из следующих: technical, other, " "support, administrative или billing." #, python-format msgid "invalid date format %s" msgstr "Недопустимый формат даты %s" #, python-format msgid "" "it is not permitted to have two projects acting as domains with the same " "name: %s" msgstr "" "Не разрешено использовать два проекта в качестве доменов с одинаковым " "именем: %s" #, python-format msgid "" "it is not permitted to have two projects within a domain with the same " "name : %s" msgstr "" "Не разрешено использовать два проекта в одном домене с одинаковыми именами: " "%s" msgid "only root projects are allowed to act as domains." msgstr "Только корневые проекты могут работать в качестве доменов." #, python-format msgid "option %(option)s in group %(group)s" msgstr "параметр %(option)s в группе %(group)s" msgid "provided consumer key does not match stored consumer key" msgstr "переданный ключ приемника не совпадает с сохраненным" msgid "provided request key does not match stored request key" msgstr "переданный ключ запроса не совпадает с сохраненным" msgid "provided verifier does not match stored verifier" msgstr "переданная функция проверки не совпадает с сохраненной" msgid "remaining_uses must be a positive integer or null." msgstr "" "Значение remaining_uses должно быть положительным целым числом или равным " "нулю." msgid "remaining_uses must not be set if redelegation is allowed" msgstr "" "Если включено изменение делегирования, параметр remaining_uses не должен " "быть задан" #, python-format msgid "" "request to update group %(group)s, but config provided contains group " "%(group_other)s instead" msgstr "" "запрос на изменение группы %(group)s, однако переданная конфигурация " "содержит группу %(group_other)s вместо нее" msgid "rescope a scoped token" msgstr "Изменить область помещенного в область ключа" #, python-format msgid "role %s is not defined" msgstr "роль %s не определена" msgid "scope.project.id must be specified if include_subtree is also specified" msgstr "scope.project.id необходимо указать, если указан include_subtree" #, python-format msgid "tls_cacertdir %s not found or is not a directory" msgstr "tls_cacertdir %s не найден или не является каталогом" #, python-format msgid "tls_cacertfile %s not found or is not a file" msgstr "tls_cacertfile %s не найден или не является файлом" #, python-format msgid "token reference must be a KeystoneToken type, got: %s" msgstr "Ссылка на маркер должна относиться к типу KeystoneToken, а получено %s" msgid "" "update of domain_id is deprecated as of Mitaka and will be removed in O." msgstr "обновление domain_id устарело в Mitaka и будет удалено в O." #, python-format msgid "" "validated expected to find %(param_name)r in function signature for " "%(func_name)r." msgstr "" "ожидалось найти проверенный параметр %(param_name)r в подписи функции " "%(func_name)r." keystone-9.0.0/keystone/locale/hu/0000775000567000056710000000000012701407246020241 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/locale/hu/LC_MESSAGES/0000775000567000056710000000000012701407246022026 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/locale/hu/LC_MESSAGES/keystone-log-critical.po0000664000567000056710000000154212701407102026567 0ustar jenkinsjenkins00000000000000# Translations template for keystone. # Copyright (C) 2015 OpenStack Foundation # This file is distributed under the same license as the keystone project. # # Translators: # OpenStack Infra , 2015. #zanata msgid "" msgstr "" "Project-Id-Version: keystone 9.0.0.0b2.dev256\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n" "POT-Creation-Date: 2016-01-18 05:50+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2014-08-31 03:19+0000\n" "Last-Translator: openstackjenkins \n" "Language: hu\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Hungarian\n" #, python-format msgid "Unable to open template file %s" msgstr "Nem nyitható meg a sablonfájl: %s" keystone-9.0.0/keystone/locale/en_AU/0000775000567000056710000000000012701407246020614 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/locale/en_AU/LC_MESSAGES/0000775000567000056710000000000012701407246022401 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/locale/en_AU/LC_MESSAGES/keystone-log-critical.po0000664000567000056710000000155312701407102027144 0ustar jenkinsjenkins00000000000000# Translations template for keystone. # Copyright (C) 2015 OpenStack Foundation # This file is distributed under the same license as the keystone project. # # Translators: # OpenStack Infra , 2015. #zanata msgid "" msgstr "" "Project-Id-Version: keystone 9.0.0.0b2.dev256\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n" "POT-Creation-Date: 2016-01-18 05:50+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2014-08-31 03:19+0000\n" "Last-Translator: openstackjenkins \n" "Language: en-AU\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: English (Australia)\n" #, python-format msgid "Unable to open template file %s" msgstr "Unable to open template file %s" keystone-9.0.0/keystone/locale/keystone.pot0000664000567000056710000012637312701407105022220 0ustar jenkinsjenkins00000000000000# Translations template for keystone. # Copyright (C) 2016 OpenStack Foundation # This file is distributed under the same license as the keystone project. # FIRST AUTHOR , 2016. # #, fuzzy msgid "" msgstr "" "Project-Id-Version: keystone 9.0.0.0rc2.dev1\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n" "POT-Creation-Date: 2016-03-18 06:34+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 2.2.0\n" #: keystone/exception.py:83 #, python-format msgid "" "Expecting to find %(attribute)s in %(target)s - the server could not " "comply with the request since it is either malformed or otherwise " "incorrect. The client is assumed to be in error." msgstr "" #: keystone/exception.py:92 #, python-format msgid "Cannot create an endpoint with an invalid URL: %(url)s" msgstr "" #: keystone/exception.py:99 #, python-format msgid "%(detail)s" msgstr "" #: keystone/exception.py:103 msgid "" "Timestamp not in expected format. The server could not comply with the " "request since it is either malformed or otherwise incorrect. The client " "is assumed to be in error." msgstr "" #: keystone/exception.py:112 msgid "" "The 'expires_at' must not be before now. The server could not comply with" " the request since it is either malformed or otherwise incorrect. The " "client is assumed to be in error." msgstr "" #: keystone/exception.py:121 #, python-format msgid "" "String length exceeded.The length of string '%(string)s' exceeded the " "limit of column %(type)s(CHAR(%(length)d))." msgstr "" #: keystone/exception.py:127 #, python-format msgid "" "Request attribute %(attribute)s must be less than or equal to %(size)i. " "The server could not comply with the request because the attribute size " "is invalid (too large). The client is assumed to be in error." msgstr "" #: keystone/exception.py:137 #, python-format msgid "" "The specified parent region %(parent_region_id)s would create a circular " "region hierarchy." msgstr "" #: keystone/exception.py:156 #, python-format msgid "" "The password length must be less than or equal to %(size)i. The server " "could not comply with the request because the password is invalid." msgstr "" #: keystone/exception.py:162 #, python-format msgid "" "Unable to delete region %(region_id)s because it or its child regions " "have associated endpoints." msgstr "" #: keystone/exception.py:167 msgid "" "The certificates you requested are not available. It is likely that this " "server does not use PKI tokens otherwise this is the result of " "misconfiguration." msgstr "" #: keystone/exception.py:179 msgid "(Disable insecure_debug mode to suppress these details.)" msgstr "" #: keystone/exception.py:189 #, python-format msgid "%(message)s %(amendment)s" msgstr "" #: keystone/exception.py:197 msgid "The request you have made requires authentication." msgstr "" #: keystone/exception.py:203 msgid "Authentication plugin error." msgstr "" #: keystone/exception.py:211 #, python-format msgid "Unable to find valid groups while using mapping %(mapping_id)s" msgstr "" #: keystone/exception.py:216 msgid "Attempted to authenticate with an unsupported method." msgstr "" #: keystone/exception.py:224 msgid "Additional authentications steps required." msgstr "" #: keystone/exception.py:232 msgid "You are not authorized to perform the requested action." msgstr "" #: keystone/exception.py:239 #, python-format msgid "You are not authorized to perform the requested action: %(action)s" msgstr "" #: keystone/exception.py:244 #, python-format msgid "" "Could not change immutable attribute(s) '%(attributes)s' in target " "%(target)s" msgstr "" #: keystone/exception.py:249 #, python-format msgid "" "Group membership across backend boundaries is not allowed, group in " "question is %(group_id)s, user is %(user_id)s" msgstr "" #: keystone/exception.py:255 #, python-format msgid "" "Invalid mix of entities for policy association - only Endpoint, Service " "or Region+Service allowed. Request was - Endpoint: %(endpoint_id)s, " "Service: %(service_id)s, Region: %(region_id)s" msgstr "" #: keystone/exception.py:262 #, python-format msgid "Invalid domain specific configuration: %(reason)s" msgstr "" #: keystone/exception.py:266 #, python-format msgid "Could not find: %(target)s" msgstr "" #: keystone/exception.py:272 #, python-format msgid "Could not find endpoint: %(endpoint_id)s" msgstr "" #: keystone/exception.py:279 msgid "An unhandled exception has occurred: Could not find metadata." msgstr "" #: keystone/exception.py:284 #, python-format msgid "Could not find policy: %(policy_id)s" msgstr "" #: keystone/exception.py:288 msgid "Could not find policy association" msgstr "" #: keystone/exception.py:292 #, python-format msgid "Could not find role: %(role_id)s" msgstr "" #: keystone/exception.py:296 #, python-format msgid "%(prior_role_id)s does not imply %(implied_role_id)s" msgstr "" #: keystone/exception.py:300 #, python-format msgid "%(role_id)s cannot be an implied roles" msgstr "" #: keystone/exception.py:304 #, python-format msgid "" "Could not find role assignment with role: %(role_id)s, user or group: " "%(actor_id)s, project or domain: %(target_id)s" msgstr "" #: keystone/exception.py:310 #, python-format msgid "Could not find region: %(region_id)s" msgstr "" #: keystone/exception.py:314 #, python-format msgid "Could not find service: %(service_id)s" msgstr "" #: keystone/exception.py:318 #, python-format msgid "Could not find domain: %(domain_id)s" msgstr "" #: keystone/exception.py:322 #, python-format msgid "Could not find project: %(project_id)s" msgstr "" #: keystone/exception.py:326 #, python-format msgid "Cannot create project with parent: %(project_id)s" msgstr "" #: keystone/exception.py:330 #, python-format msgid "Could not find token: %(token_id)s" msgstr "" #: keystone/exception.py:334 #, python-format msgid "Could not find user: %(user_id)s" msgstr "" #: keystone/exception.py:338 #, python-format msgid "Could not find group: %(group_id)s" msgstr "" #: keystone/exception.py:342 #, python-format msgid "Could not find mapping: %(mapping_id)s" msgstr "" #: keystone/exception.py:346 #, python-format msgid "Could not find trust: %(trust_id)s" msgstr "" #: keystone/exception.py:350 #, python-format msgid "No remaining uses for trust: %(trust_id)s" msgstr "" #: keystone/exception.py:354 #, python-format msgid "Could not find credential: %(credential_id)s" msgstr "" #: keystone/exception.py:358 #, python-format msgid "Could not find version: %(version)s" msgstr "" #: keystone/exception.py:362 #, python-format msgid "Could not find Endpoint Group: %(endpoint_group_id)s" msgstr "" #: keystone/exception.py:366 #, python-format msgid "Could not find Identity Provider: %(idp_id)s" msgstr "" #: keystone/exception.py:370 #, python-format msgid "Could not find Service Provider: %(sp_id)s" msgstr "" #: keystone/exception.py:374 #, python-format msgid "" "Could not find federated protocol %(protocol_id)s for Identity Provider: " "%(idp_id)s" msgstr "" #: keystone/exception.py:385 #, python-format msgid "" "Could not find %(group_or_option)s in domain configuration for domain " "%(domain_id)s" msgstr "" #: keystone/exception.py:403 #, python-format msgid "Conflict occurred attempting to store %(type)s - %(details)s" msgstr "" #: keystone/exception.py:412 msgid "An unexpected error prevented the server from fulfilling your request." msgstr "" #: keystone/exception.py:415 #, python-format msgid "" "An unexpected error prevented the server from fulfilling your request: " "%(exception)s" msgstr "" #: keystone/exception.py:433 #, python-format msgid "Unable to consume trust %(trust_id)s, unable to acquire lock." msgstr "" #: keystone/exception.py:438 msgid "" "Expected signing certificates are not available on the server. Please " "check Keystone configuration." msgstr "" #: keystone/exception.py:444 #, python-format msgid "Malformed endpoint URL (%(endpoint)s), see ERROR log for details." msgstr "" #: keystone/exception.py:449 #, python-format msgid "" "Group %(group_id)s returned by mapping %(mapping_id)s was not found in " "the backend." msgstr "" #: keystone/exception.py:454 #, python-format msgid "Error while reading metadata file, %(reason)s" msgstr "" #: keystone/exception.py:458 #, python-format msgid "" "Local section in mapping %(mapping_id)s refers to a remote match that " "doesn't exist (e.g. {0} in a local section)." msgstr "" #: keystone/exception.py:464 #, python-format msgid "" "Unexpected combination of grant attributes - User: %(user_id)s, Group: " "%(group_id)s, Project: %(project_id)s, Domain: %(domain_id)s" msgstr "" #: keystone/exception.py:471 msgid "The action you have requested has not been implemented." msgstr "" #: keystone/exception.py:478 msgid "The service you have requested is no longer available on this server." msgstr "" #: keystone/exception.py:485 #, python-format msgid "The Keystone configuration file %(config_file)s could not be found." msgstr "" #: keystone/exception.py:490 msgid "" "No encryption keys found; run keystone-manage fernet_setup to bootstrap " "one." msgstr "" #: keystone/exception.py:495 #, python-format msgid "" "The Keystone domain-specific configuration has specified more than one " "SQL driver (only one is permitted): %(source)s." msgstr "" #: keystone/exception.py:502 #, python-format msgid "" "%(mod_name)s doesn't provide database migrations. The migration " "repository path at %(path)s doesn't exist or isn't a directory." msgstr "" #: keystone/exception.py:509 msgid "Token version is unrecognizable or unsupported." msgstr "" #: keystone/exception.py:514 #, python-format msgid "" "Unable to sign SAML assertion. It is likely that this server does not " "have xmlsec1 installed, or this is the result of misconfiguration. Reason" " %(reason)s" msgstr "" #: keystone/exception.py:521 msgid "" "No Authorization headers found, cannot proceed with OAuth related calls, " "if running under HTTPd or Apache, ensure WSGIPassAuthorization is set to " "On." msgstr "" #: keystone/exception.py:528 #, python-format msgid "" "Could not determine Identity Provider ID. The configuration option " "%(issuer_attribute)s was not found in the request environment." msgstr "" #: keystone/exception.py:536 #, python-format msgid "" "The %s extension has been moved into keystone core and as such its " "migrations are maintained by the main keystone database control. Use the " "command: keystone-manage db_sync" msgstr "" #: keystone/exception.py:544 #, python-format msgid "%(driver)s is not supported driver version" msgstr "" #: keystone/notifications.py:232 #, python-format msgid "%(event)s is not a valid notification event, must be one of: %(actions)s" msgstr "" #: keystone/notifications.py:241 #, python-format msgid "Method not callable: %s" msgstr "" #: keystone/assignment/controllers.py:100 keystone/identity/controllers.py:71 #: keystone/resource/controllers.py:90 msgid "Name field is required and cannot be empty" msgstr "" #: keystone/assignment/controllers.py:146 #: keystone/assignment/controllers.py:163 #: keystone/assignment/controllers.py:182 msgid "User roles not supported: tenant_id required" msgstr "" #: keystone/assignment/controllers.py:567 #: keystone/assignment/controllers.py:856 msgid "Specify a domain or project, not both" msgstr "" #: keystone/assignment/controllers.py:570 msgid "Specify one of domain or project" msgstr "" #: keystone/assignment/controllers.py:575 #: keystone/assignment/controllers.py:861 msgid "Specify a user or group, not both" msgstr "" #: keystone/assignment/controllers.py:578 msgid "Specify one of user or group" msgstr "" #: keystone/assignment/controllers.py:845 msgid "Combining effective and group filter will always result in an empty list." msgstr "" #: keystone/assignment/controllers.py:850 msgid "" "Combining effective, domain and inherited filters will always result in " "an empty list." msgstr "" #: keystone/assignment/controllers.py:952 msgid "scope.project.id must be specified if include_subtree is also specified" msgstr "" #: keystone/assignment/core.py:77 msgid "" "Use of the identity driver config to automatically configure the same " "assignment driver has been deprecated, in the \"O\" release, the " "assignment driver will need to be expicitly configured if different than " "the default (SQL)." msgstr "" #: keystone/assignment/core.py:88 #, python-format msgid "" "Attempted automatic driver selection for assignment based upon " "[identity]\\driver option failed since driver %s is not found. Set " "[assignment]/driver to a valid driver in keystone config." msgstr "" #: keystone/assignment/core.py:179 msgid "Must specify either domain or project" msgstr "" #: keystone/assignment/core.py:848 msgid "Cannot list assignments sourced from groups and filtered by user ID." msgstr "" #: keystone/assignment/core.py:1058 #, python-format msgid "Project (%s)" msgstr "" #: keystone/assignment/core.py:1060 #, python-format msgid "Domain (%s)" msgstr "" #: keystone/assignment/core.py:1062 msgid "Unknown Target" msgstr "" #: keystone/assignment/core.py:1518 msgid "Update of `domain_id` is not allowed." msgstr "" #: keystone/assignment/core.py:1743 msgid "Domain specific roles are not supported in the V8 role driver" msgstr "" #: keystone/assignment/V8_backends/sql.py:287 #: keystone/assignment/backends/sql.py:137 #, python-format msgid "Cannot remove role that has not been granted, %s" msgstr "" #: keystone/assignment/V8_backends/sql.py:363 #: keystone/assignment/backends/sql.py:213 #, python-format msgid "Unexpected assignment type encountered, %s" msgstr "" #: keystone/auth/controllers.py:60 #, python-format msgid "" "Direct import of auth plugin %(name)r is deprecated as of Liberty in " "favor of its entrypoint from %(namespace)r and may be removed in N." msgstr "" #: keystone/auth/controllers.py:121 #, python-format msgid "" "Unable to reconcile identity attribute %(attribute)s as it has " "conflicting values %(new)s and %(old)s" msgstr "" #: keystone/auth/controllers.py:182 msgid "Domain name cannot contain reserved characters." msgstr "" #: keystone/auth/controllers.py:205 msgid "Project name cannot contain reserved characters." msgstr "" #: keystone/auth/controllers.py:355 keystone/middleware/auth.py:130 msgid "Scoping to both domain and project is not allowed" msgstr "" #: keystone/auth/controllers.py:358 msgid "Scoping to both domain and trust is not allowed" msgstr "" #: keystone/auth/controllers.py:361 msgid "Scoping to both project and trust is not allowed" msgstr "" #: keystone/auth/controllers.py:530 msgid "User not found" msgstr "" #: keystone/auth/controllers.py:644 msgid "A project-scoped token is required to produce a service catalog." msgstr "" #: keystone/auth/plugins/external.py:42 msgid "No authenticated user" msgstr "" #: keystone/auth/plugins/external.py:52 #, python-format msgid "Unable to lookup user %s" msgstr "" #: keystone/auth/plugins/external.py:100 msgid "auth_type is not Negotiate" msgstr "" #: keystone/auth/plugins/mapped.py:246 msgid "" "Could not map user while setting ephemeral user identity. Either mapping " "rules must specify user id/name or REMOTE_USER environment variable must " "be set." msgstr "" #: keystone/auth/plugins/oauth1.py:46 msgid "Access token is expired" msgstr "" #: keystone/auth/plugins/oauth1.py:60 msgid "Could not validate the access token" msgstr "" #: keystone/auth/plugins/password.py:39 msgid "Invalid username or password" msgstr "" #: keystone/auth/plugins/token.py:70 keystone/token/controllers.py:160 msgid "rescope a scoped token" msgstr "" #: keystone/auth/plugins/totp.py:96 msgid "Invalid username or TOTP passcode" msgstr "" #: keystone/catalog/controllers.py:215 #, python-format msgid "Conflicting region IDs specified: \"%(url_id)s\" != \"%(ref_id)s\"" msgstr "" #: keystone/catalog/core.py:149 keystone/common/ldap/core.py:1411 #, python-format msgid "Duplicate ID, %s." msgstr "" #: keystone/catalog/backends/sql.py:389 #, python-format msgid "Endpoint %(endpoint_id)s not found in project %(project_id)s" msgstr "" #: keystone/catalog/backends/sql.py:492 msgid "Endpoint Group Project Association not found" msgstr "" #: keystone/cmd/cli.py:173 msgid "Either --bootstrap-password argument or OS_BOOTSTRAP_PASSWORD must be set." msgstr "" #: keystone/cmd/cli.py:586 msgid "At least one option must be provided" msgstr "" #: keystone/cmd/cli.py:593 msgid "--all option cannot be mixed with other options" msgstr "" #: keystone/cmd/cli.py:600 #, python-format msgid "Unknown domain '%(name)s' specified by --domain-name" msgstr "" #: keystone/cmd/cli.py:679 keystone/tests/unit/test_cli.py:411 msgid "At least one option must be provided, use either --all or --domain-name" msgstr "" #: keystone/cmd/cli.py:685 keystone/tests/unit/test_cli.py:427 msgid "The --all option cannot be used with the --domain-name option" msgstr "" #: keystone/cmd/cli.py:710 keystone/tests/unit/test_cli.py:444 #, python-format msgid "" "Invalid domain name: %(domain)s found in config file name: %(file)s - " "ignoring this file." msgstr "" #: keystone/cmd/cli.py:718 keystone/tests/unit/test_cli.py:385 #, python-format msgid "" "Domain: %(domain)s already has a configuration defined - ignoring file: " "%(file)s." msgstr "" #: keystone/cmd/cli.py:732 #, python-format msgid "Error parsing configuration file for domain: %(domain)s, file: %(file)s." msgstr "" #: keystone/cmd/cli.py:765 #, python-format msgid "" "To get a more detailed information on this error, re-run this command for" " the specific domain, i.e.: keystone-manage domain_config_upload " "--domain-name %s" msgstr "" #: keystone/cmd/cli.py:783 #, python-format msgid "Unable to locate domain config directory: %s" msgstr "" #: keystone/cmd/cli.py:803 msgid "" "Unable to access the keystone database, please check it is configured " "correctly." msgstr "" #: keystone/cmd/cli.py:866 #, python-format msgid "Error while parsing rules %(path)s: %(err)s" msgstr "" #: keystone/cmd/cli.py:875 #, python-format msgid "Error while opening file %(path)s: %(err)s" msgstr "" #: keystone/cmd/cli.py:885 #, python-format msgid "Error while parsing line: '%(line)s': %(err)s" msgstr "" #: keystone/common/authorization.py:61 keystone/common/wsgi.py:67 #, python-format msgid "token reference must be a KeystoneToken type, got: %s" msgstr "" #: keystone/common/clean.py:24 #, python-format msgid "%s cannot be empty." msgstr "" #: keystone/common/clean.py:26 #, python-format msgid "%(property_name)s cannot be less than %(min_length)s characters." msgstr "" #: keystone/common/clean.py:31 #, python-format msgid "%(property_name)s should not be greater than %(max_length)s characters." msgstr "" #: keystone/common/clean.py:40 #, python-format msgid "%(property_name)s is not a %(display_expected_type)s" msgstr "" #: keystone/common/controller.py:349 keystone/common/controller.py:377 #: keystone/identity/core.py:595 keystone/resource/core.py:1143 #, python-format msgid "Expected dict or list: %s" msgstr "" #: keystone/common/controller.py:390 msgid "Marker could not be found" msgstr "" #: keystone/common/controller.py:401 msgid "Invalid limit value" msgstr "" #: keystone/common/controller.py:705 msgid "Cannot change Domain ID" msgstr "" #: keystone/common/controller.py:751 msgid "" "You have tried to create a resource using the admin token. As this token " "is not within a domain you must explicitly include a domain for this " "resource to belong to." msgstr "" #: keystone/common/dependency.py:65 #, python-format msgid "Unregistered dependency: %(name)s for %(targets)s" msgstr "" #: keystone/common/driver_hints.py:38 msgid "" "Cannot truncate a driver call without hints list as first parameter after" " self " msgstr "" #: keystone/common/json_home.py:76 #, python-format msgid "Unexpected status requested for JSON Home response, %s" msgstr "" #: keystone/common/manager.py:82 #, python-format msgid "" "Direct import of driver %(name)r is deprecated as of Liberty in favor of " "its entrypoint from %(namespace)r and may be removed in N." msgstr "" #: keystone/common/tokenless_auth.py:73 msgid "Neither Project Domain ID nor Project Domain Name was provided." msgstr "" #: keystone/common/tokenless_auth.py:165 msgid "" "User auth cannot be built due to missing either user id, or user name " "with domain id, or user name with domain name." msgstr "" #: keystone/common/utils.py:63 msgid "Length of transformable resource id > 64, which is max allowed characters" msgstr "" #: keystone/common/utils.py:192 keystone/credential/controllers.py:44 msgid "Invalid blob in credential" msgstr "" #: keystone/common/wsgi.py:208 msgid "Query string is not UTF-8 encoded" msgstr "" #: keystone/common/wsgi.py:341 #, python-format msgid "%s field is required and cannot be empty" msgstr "" #: keystone/common/wsgi.py:353 #, python-format msgid "%s field(s) cannot be empty" msgstr "" #: keystone/common/wsgi.py:548 msgid "The resource could not be found." msgstr "" #: keystone/common/kvs/core.py:88 #, python-format msgid "Lock Timeout occurred for key, %(target)s" msgstr "" #: keystone/common/kvs/core.py:123 #, python-format msgid "KVS region %s is already configured. Cannot reconfigure." msgstr "" #: keystone/common/kvs/core.py:166 #, python-format msgid "Key Value Store not configured: %s" msgstr "" #: keystone/common/kvs/core.py:219 msgid "`key_mangler` option must be a function reference" msgstr "" #: keystone/common/kvs/core.py:376 #, python-format msgid "Lock key must match target key: %(lock)s != %(target)s" msgstr "" #: keystone/common/kvs/core.py:380 msgid "Must be called within an active lock context." msgstr "" #: keystone/common/kvs/backends/memcached.py:68 #, python-format msgid "Maximum lock attempts on %s occurred." msgstr "" #: keystone/common/kvs/backends/memcached.py:109 #, python-format msgid "" "Backend `%(backend)s` is not a valid memcached backend. Valid backends: " "%(backend_list)s" msgstr "" #: keystone/common/kvs/backends/memcached.py:185 msgid "`key_mangler` functions must be callable." msgstr "" #: keystone/common/ldap/core.py:199 #, python-format msgid "Invalid LDAP deref option: %(option)s. Choose one of: %(options)s" msgstr "" #: keystone/common/ldap/core.py:209 #, python-format msgid "Invalid LDAP TLS certs option: %(option)s. Choose one of: %(options)s" msgstr "" #: keystone/common/ldap/core.py:221 #, python-format msgid "Invalid LDAP scope: %(scope)s. Choose one of: %(options)s" msgstr "" #: keystone/common/ldap/core.py:591 msgid "Invalid TLS / LDAPS combination" msgstr "" #: keystone/common/ldap/core.py:596 #, python-format msgid "Invalid LDAP TLS_AVAIL option: %s. TLS not available" msgstr "" #: keystone/common/ldap/core.py:606 #, python-format msgid "tls_cacertfile %s not found or is not a file" msgstr "" #: keystone/common/ldap/core.py:618 #, python-format msgid "tls_cacertdir %s not found or is not a directory" msgstr "" #: keystone/common/ldap/core.py:1333 #, python-format msgid "ID attribute %(id_attr)s not found in LDAP object %(dn)s" msgstr "" #: keystone/common/ldap/core.py:1378 #, python-format msgid "LDAP %s create" msgstr "" #: keystone/common/ldap/core.py:1383 #, python-format msgid "LDAP %s update" msgstr "" #: keystone/common/ldap/core.py:1388 #, python-format msgid "LDAP %s delete" msgstr "" #: keystone/common/ldap/core.py:1400 #, python-format msgid "Duplicate name, %s." msgstr "" #: keystone/common/ldap/core.py:1557 msgid "" "Disabling an entity where the 'enable' attribute is ignored by " "configuration." msgstr "" #: keystone/common/ldap/core.py:1568 #, python-format msgid "Cannot change %(option_name)s %(attr)s" msgstr "" #: keystone/common/ldap/core.py:1655 #, python-format msgid "Member %(member)s is already a member of group %(group)s" msgstr "" #: keystone/common/sql/core.py:413 msgid "Duplicate Entry" msgstr "" #: keystone/common/sql/core.py:429 #, python-format msgid "An unexpected error occurred when trying to store %s" msgstr "" #: keystone/common/sql/migration_helpers.py:167 msgid "Unable to downgrade schema" msgstr "" #: keystone/common/sql/migration_helpers.py:185 #: keystone/common/sql/migration_helpers.py:231 #, python-format msgid "%s extension does not exist." msgstr "" #: keystone/common/validation/__init__.py:44 #, python-format msgid "" "validated expected to find %(param_name)r in function signature for " "%(func_name)r." msgstr "" #: keystone/common/validation/validators.py:53 #, python-format msgid "Invalid input for field '%(path)s'. The value is '%(value)s'." msgstr "" #: keystone/contrib/admin_crud/core.py:28 msgid "" "Remove admin_crud_extension from the paste pipeline, the admin_crud " "extension is now always available. Updatethe [pipeline:admin_api] section" " in keystone-paste.ini accordingly, as it will be removed in the O " "release." msgstr "" #: keystone/contrib/ec2/controllers.py:80 keystone/contrib/s3/core.py:111 #: keystone/contrib/s3/core.py:114 msgid "Invalid EC2 signature." msgstr "" #: keystone/contrib/ec2/controllers.py:83 #: keystone/contrib/ec2/controllers.py:87 #: keystone/contrib/ec2/controllers.py:125 msgid "EC2 signature not supplied." msgstr "" #: keystone/contrib/ec2/controllers.py:159 msgid "User not valid for tenant." msgstr "" #: keystone/contrib/ec2/controllers.py:260 msgid "EC2 access key not found." msgstr "" #: keystone/contrib/ec2/controllers.py:326 msgid "Token belongs to another user" msgstr "" #: keystone/contrib/ec2/controllers.py:354 msgid "Credential belongs to another user" msgstr "" #: keystone/contrib/endpoint_filter/routers.py:29 msgid "" "Remove endpoint_filter_extension from the paste pipeline, the endpoint " "filter extension is now always available. Update the [pipeline:api_v3] " "section in keystone-paste.ini accordingly as it will be removed in the O " "release." msgstr "" #: keystone/contrib/federation/routers.py:27 msgid "" "Remove federation_extension from the paste pipeline, the federation " "extension is now always available. Update the [pipeline:api_v3] section " "in keystone-paste.ini accordingly, as it will be removed in the O " "release." msgstr "" #: keystone/contrib/oauth1/routers.py:29 msgid "" "Remove oauth1_extension from the paste pipeline, the oauth1 extension is " "now always available. Update the [pipeline:api_v3] section in keystone-" "paste.ini accordingly, as it will be removed in the O release." msgstr "" #: keystone/contrib/revoke/routers.py:27 msgid "" "Remove revoke_extension from the paste pipeline, the revoke extension is " "now always available. Update the [pipeline:api_v3] section in keystone-" "paste.ini accordingly, as it will be removed in the O release." msgstr "" #: keystone/contrib/s3/core.py:82 msgid "Credential signature mismatch" msgstr "" #: keystone/contrib/simple_cert/routers.py:27 msgid "" "Remove simple_cert from the paste pipeline, the PKI and PKIz token " "providers are now deprecated and simple_cert was only used insupport of " "these token providers. Update the [pipeline:api_v3] section in keystone-" "paste.ini accordingly, as it will be removed in the O release." msgstr "" #: keystone/contrib/user_crud/core.py:28 msgid "" "Remove user_crud_extension from the paste pipeline, the user_crud " "extension is now always available. Updatethe [pipeline:public_api] " "section in keystone-paste.ini accordingly, as it will be removed in the O" " release." msgstr "" #: keystone/endpoint_policy/core.py:264 #, python-format msgid "No policy is associated with endpoint %(endpoint_id)s." msgstr "" #: keystone/federation/controllers.py:269 msgid "Request must have an origin query parameter" msgstr "" #: keystone/federation/controllers.py:278 #, python-format msgid "%(host)s is not a trusted dashboard host" msgstr "" #: keystone/federation/controllers.py:309 msgid "Missing entity ID from environment" msgstr "" #: keystone/federation/controllers.py:357 msgid "Use a project scoped token when attempting to create a SAML assertion" msgstr "" #: keystone/federation/idp.py:486 #, python-format msgid "Cannot open certificate %(cert_file)s. Reason: %(reason)s" msgstr "" #: keystone/federation/idp.py:552 msgid "Ensure configuration option idp_entity_id is set." msgstr "" #: keystone/federation/idp.py:555 msgid "Ensure configuration option idp_sso_endpoint is set." msgstr "" #: keystone/federation/idp.py:574 msgid "" "idp_contact_type must be one of: [technical, other, support, " "administrative or billing." msgstr "" #: keystone/federation/utils.py:234 msgid "Federation token is expired" msgstr "" #: keystone/federation/utils.py:286 msgid "Could not find Identity Provider identifier in environment" msgstr "" #: keystone/federation/utils.py:290 msgid "" "Incoming identity provider identifier not included among the accepted " "identifiers." msgstr "" #: keystone/federation/utils.py:585 #, python-format msgid "User type %s not supported" msgstr "" #: keystone/federation/utils.py:605 msgid "" "Could not map any federated user properties to identity values. Check " "debug logs or the mapping used for additional details." msgstr "" #: keystone/federation/utils.py:629 #, python-format msgid "" "Invalid rule: %(identity_value)s. Both 'groups' and 'domain' keywords " "must be specified." msgstr "" #: keystone/federation/utils.py:854 #, python-format msgid "Identity Provider %(idp)s is disabled" msgstr "" #: keystone/federation/utils.py:862 #, python-format msgid "Service Provider %(sp)s is disabled" msgstr "" #: keystone/federation/backends/sql.py:182 #, python-format msgid "Duplicate remote ID: %s" msgstr "" #: keystone/federation/backends/sql.py:184 #, python-format msgid "Duplicate entry: %s" msgstr "" #: keystone/identity/controllers.py:74 msgid "Enabled field must be a boolean" msgstr "" #: keystone/identity/controllers.py:103 msgid "Enabled field should be a boolean" msgstr "" #: keystone/identity/core.py:265 #, python-format msgid "Config API entity at /domains/%s/config" msgstr "" #: keystone/identity/core.py:271 #, python-format msgid "" "Exceeded attempts to register domain %(domain)s to use the SQL driver, " "the last domain that appears to have had it is %(last_domain)s, giving up" msgstr "" #: keystone/identity/core.py:450 keystone/identity/backends/ldap.py:62 #: keystone/identity/backends/ldap.py:64 keystone/identity/backends/ldap.py:70 #: keystone/identity/backends/ldap.py:72 keystone/identity/backends/sql.py:210 #: keystone/identity/backends/sql.py:212 msgid "Invalid user / password" msgstr "" #: keystone/identity/core.py:895 #, python-format msgid "User is disabled: %s" msgstr "" #: keystone/identity/core.py:928 keystone/resource/core.py:375 msgid "update of domain_id is deprecated as of Mitaka and will be removed in O." msgstr "" #: keystone/identity/core.py:947 msgid "Cannot change user ID" msgstr "" #: keystone/identity/backends/ldap.py:35 #, python-format msgid "" "%s for the LDAP identity backend has been deprecated in the Mitaka " "release in favor of read-only identity LDAP access. It will be removed in" " the \"O\" release." msgstr "" #: keystone/identity/backends/ldap.py:106 msgid "Cannot change user name" msgstr "" #: keystone/identity/backends/ldap.py:214 keystone/identity/backends/sql.py:292 #: keystone/identity/backends/sql.py:310 #, python-format msgid "User '%(user_id)s' not found in group '%(group_id)s'" msgstr "" #: keystone/identity/backends/ldap.py:366 #, python-format msgid "User %(user_id)s is already a member of group %(group_id)s" msgstr "" #: keystone/models/token_model.py:62 msgid "Found invalid token: scoped to both project and domain." msgstr "" #: keystone/oauth1/controllers.py:126 msgid "Cannot list request tokens with a token issued via delegation." msgstr "" #: keystone/oauth1/controllers.py:187 keystone/oauth1/backends/sql.py:256 msgid "User IDs do not match" msgstr "" #: keystone/oauth1/controllers.py:243 msgid "Invalid signature" msgstr "" #: keystone/oauth1/controllers.py:294 keystone/oauth1/controllers.py:372 msgid "Request token is expired" msgstr "" #: keystone/oauth1/controllers.py:308 msgid "There should not be any non-oauth parameters" msgstr "" #: keystone/oauth1/controllers.py:312 msgid "provided consumer key does not match stored consumer key" msgstr "" #: keystone/oauth1/controllers.py:316 msgid "provided verifier does not match stored verifier" msgstr "" #: keystone/oauth1/controllers.py:320 msgid "provided request key does not match stored request key" msgstr "" #: keystone/oauth1/controllers.py:324 msgid "Request Token does not have an authorizing user id" msgstr "" #: keystone/oauth1/controllers.py:361 msgid "Cannot authorize a request token with a token issued via delegation." msgstr "" #: keystone/oauth1/controllers.py:388 msgid "authorizing user does not have role required" msgstr "" #: keystone/oauth1/controllers.py:401 msgid "User is not a member of the requested project" msgstr "" #: keystone/oauth1/backends/sql.py:91 msgid "Consumer not found" msgstr "" #: keystone/oauth1/backends/sql.py:177 msgid "Request token not found" msgstr "" #: keystone/oauth1/backends/sql.py:237 msgid "Access token not found" msgstr "" #: keystone/resource/controllers.py:94 msgid "The creation of projects acting as domains is not allowed in v2." msgstr "" #: keystone/resource/controllers.py:284 msgid "" "Cannot use parents_as_list and parents_as_ids query params at the same " "time." msgstr "" #: keystone/resource/controllers.py:290 msgid "" "Cannot use subtree_as_list and subtree_as_ids query params at the same " "time." msgstr "" #: keystone/resource/core.py:106 #, python-format msgid "Max hierarchy depth reached for %s branch." msgstr "" #: keystone/resource/core.py:123 msgid "Multiple domains are not supported" msgstr "" #: keystone/resource/core.py:129 msgid "only root projects are allowed to act as domains." msgstr "" #: keystone/resource/core.py:152 #, python-format msgid "" "Cannot create project, since its parent (%(domain_id)s) is acting as a " "domain, but project's specified parent_id (%(parent_id)s) does not match " "this domain_id." msgstr "" #: keystone/resource/core.py:163 #, python-format msgid "" "Cannot create project, since it specifies its owner as domain " "%(domain_id)s, but specifies a parent in a different domain " "(%(parent_domain_id)s)." msgstr "" #: keystone/resource/core.py:183 #, python-format msgid "cannot create a project in a branch containing a disabled project: %s" msgstr "" #: keystone/resource/core.py:191 #, python-format msgid "" "%(entity)s name cannot contain the following reserved characters: " "%(chars)s" msgstr "" #: keystone/resource/core.py:201 #, python-format msgid "" "it is not permitted to have two projects acting as domains with the same " "name: %s" msgstr "" #: keystone/resource/core.py:205 #, python-format msgid "" "it is not permitted to have two projects within a domain with the same " "name : %s" msgstr "" #: keystone/resource/core.py:262 #, python-format msgid "Domain is disabled: %s" msgstr "" #: keystone/resource/core.py:279 #, python-format msgid "Domain cannot be named %s" msgstr "" #: keystone/resource/core.py:282 #, python-format msgid "Domain cannot have ID %s" msgstr "" #: keystone/resource/core.py:297 #, python-format msgid "Project is disabled: %s" msgstr "" #: keystone/resource/core.py:304 #, python-format msgid "Cannot enable project %s since it has disabled parents" msgstr "" #: keystone/resource/core.py:340 msgid "Update of `parent_id` is not allowed." msgstr "" #: keystone/resource/core.py:345 msgid "Update of `is_domain` is not allowed." msgstr "" #: keystone/resource/core.py:359 msgid "Update of domain_id of projects acting as domains is not allowed." msgstr "" #: keystone/resource/core.py:366 msgid "Update of domain_id is only allowed for root projects." msgstr "" #: keystone/resource/core.py:371 msgid "Cannot update domain_id of a project that has children." msgstr "" #: keystone/resource/core.py:396 #, python-format msgid "" "Cannot disable project %(project_id)s since its subtree contains enabled " "projects." msgstr "" #: keystone/resource/core.py:441 msgid "Cascade update is only allowed for enabled attribute." msgstr "" #: keystone/resource/core.py:505 #, python-format msgid "" "cannot delete an enabled project acting as a domain. Please disable the " "project %s first." msgstr "" #: keystone/resource/core.py:511 #, python-format msgid "" "Cannot delete the project %s since it is not a leaf in the hierarchy. Use" " the cascade option if you want to delete a whole subtree." msgstr "" #: keystone/resource/core.py:524 #, python-format msgid "" "Cannot delete project %(project_id)s since its subtree contains enabled " "projects." msgstr "" #: keystone/resource/core.py:552 msgid "Project field is required and cannot be empty." msgstr "" #: keystone/resource/core.py:793 msgid "Cannot delete a domain that is enabled, please disable it first." msgstr "" #: keystone/resource/core.py:1568 msgid "No options specified" msgstr "" #: keystone/resource/core.py:1574 #, python-format msgid "" "The value of group %(group)s specified in the config should be a " "dictionary of options" msgstr "" #: keystone/resource/core.py:1598 #, python-format msgid "" "Option %(option)s found with no group specified while checking domain " "configuration request" msgstr "" #: keystone/resource/core.py:1605 #, python-format msgid "Group %(group)s is not supported for domain specific configurations" msgstr "" #: keystone/resource/core.py:1612 #, python-format msgid "" "Option %(option)s in group %(group)s is not supported for domain specific" " configurations" msgstr "" #: keystone/resource/core.py:1664 msgid "An unexpected error occurred when retrieving domain configs" msgstr "" #: keystone/resource/core.py:1743 keystone/resource/core.py:1826 #: keystone/resource/core.py:1896 keystone/resource/config_backends/sql.py:76 #, python-format msgid "option %(option)s in group %(group)s" msgstr "" #: keystone/resource/core.py:1746 keystone/resource/core.py:1831 #: keystone/resource/core.py:1892 #, python-format msgid "group %(group)s" msgstr "" #: keystone/resource/core.py:1748 msgid "any options" msgstr "" #: keystone/resource/core.py:1791 #, python-format msgid "" "Trying to update option %(option)s in group %(group)s, so that, and only " "that, option must be specified in the config" msgstr "" #: keystone/resource/core.py:1796 #, python-format msgid "" "Trying to update group %(group)s, so that, and only that, group must be " "specified in the config" msgstr "" #: keystone/resource/core.py:1805 #, python-format msgid "" "request to update group %(group)s, but config provided contains group " "%(group_other)s instead" msgstr "" #: keystone/resource/core.py:1812 #, python-format msgid "" "Trying to update option %(option)s in group %(group)s, but config " "provided contains option %(option_other)s instead" msgstr "" #: keystone/resource/core.py:2004 #, python-format msgid "Group %s not found in config" msgstr "" #: keystone/resource/core.py:2014 #, python-format msgid "" "Reading the default for option %(option)s in group %(group)s is not " "supported" msgstr "" #: keystone/revoke/controllers.py:33 #, python-format msgid "invalid date format %s" msgstr "" #: keystone/revoke/core.py:156 msgid "" "The revoke call must not have both domain_id and project_id. This is a " "bug in the Keystone server. The current request is aborted." msgstr "" #: keystone/revoke/core.py:226 keystone/token/provider.py:217 #: keystone/token/provider.py:256 keystone/token/provider.py:336 #: keystone/token/provider.py:343 msgid "Failed to validate token" msgstr "" #: keystone/server/eventlet.py:77 msgid "" "Running keystone via eventlet is deprecated as of Kilo in favor of " "running in a WSGI server (e.g. mod_wsgi). Support for keystone under " "eventlet will be removed in the \"M\"-Release." msgstr "" #: keystone/server/eventlet.py:90 #, python-format msgid "Failed to start the %(name)s server" msgstr "" #: keystone/token/controllers.py:372 msgid "Tenant name cannot contain reserved characters." msgstr "" #: keystone/token/controllers.py:392 #, python-format msgid "Project ID not found: %(t_id)s" msgstr "" #: keystone/token/controllers.py:396 #, python-format msgid "User %(u_id)s is unauthorized for tenant %(t_id)s" msgstr "" #: keystone/token/controllers.py:415 keystone/token/controllers.py:418 msgid "Token does not belong to specified tenant." msgstr "" #: keystone/token/provider.py:269 keystone/token/provider.py:293 msgid "No token in the request" msgstr "" #: keystone/token/persistence/backends/kvs.py:132 #, python-format msgid "Unknown token version %s" msgstr "" #: keystone/token/providers/common.py:313 #: keystone/token/providers/common.py:445 #, python-format msgid "User %(user_id)s has no access to project %(project_id)s" msgstr "" #: keystone/token/providers/common.py:318 #: keystone/token/providers/common.py:450 #, python-format msgid "User %(user_id)s has no access to domain %(domain_id)s" msgstr "" #: keystone/token/providers/common.py:345 msgid "Trustor is disabled." msgstr "" #: keystone/token/providers/common.py:434 msgid "Trustee has no delegated roles." msgstr "" #: keystone/token/providers/common.py:496 #, python-format msgid "Invalid audit info data type: %(data)s (%(type)s)" msgstr "" #: keystone/token/providers/common.py:560 #: keystone/token/providers/common.py:587 msgid "The configured token provider does not support bind authentication." msgstr "" #: keystone/token/providers/common.py:598 msgid "User is not a trustee." msgstr "" #: keystone/token/providers/common.py:665 msgid "" "Attempting to use OS-FEDERATION token with V2 Identity Service, use V3 " "Authentication" msgstr "" #: keystone/token/providers/common.py:675 msgid "Non-default domain is not supported" msgstr "" #: keystone/token/providers/common.py:679 msgid "Domain scoped token is not supported" msgstr "" #: keystone/token/providers/pki.py:53 keystone/token/providers/pkiz.py:31 msgid "Unable to sign token." msgstr "" #: keystone/token/providers/fernet/token_formatters.py:102 #, python-format msgid "This is not a recognized Fernet token %s" msgstr "" #: keystone/token/providers/fernet/token_formatters.py:198 #, python-format msgid "This is not a recognized Fernet payload version: %s" msgstr "" #: keystone/trust/controllers.py:107 #, python-format msgid "role %s is not defined" msgstr "" #: keystone/trust/controllers.py:131 msgid "Redelegation allowed for delegated by trust only" msgstr "" #: keystone/trust/controllers.py:164 msgid "The authenticated user should match the trustor." msgstr "" #: keystone/trust/controllers.py:169 msgid "At least one role should be specified." msgstr "" #: keystone/trust/core.py:58 #, python-format msgid "" "Remaining redelegation depth of %(redelegation_depth)d out of allowed " "range of [0..%(max_count)d]" msgstr "" #: keystone/trust/core.py:67 #, python-format msgid "" "Field \"remaining_uses\" is set to %(value)s while it must not be set in " "order to redelegate a trust" msgstr "" #: keystone/trust/core.py:78 msgid "Requested expiration time is more than redelegated trust can provide" msgstr "" #: keystone/trust/core.py:88 msgid "Some of requested roles are not in redelegated trust" msgstr "" #: keystone/trust/core.py:112 msgid "One of the trust agents is disabled or deleted" msgstr "" #: keystone/trust/core.py:131 msgid "remaining_uses must be a positive integer or null." msgstr "" #: keystone/trust/core.py:137 #, python-format msgid "" "Requested redelegation depth of %(requested_count)d is greater than " "allowed %(max_count)d" msgstr "" #: keystone/trust/core.py:144 msgid "remaining_uses must not be set if redelegation is allowed" msgstr "" #: keystone/trust/core.py:154 msgid "" "Modifying \"redelegation_count\" upon redelegation is forbidden. Omitting" " this parameter is advised." msgstr "" keystone-9.0.0/keystone/locale/keystone-log-warning.pot0000664000567000056710000002172612701407102024433 0ustar jenkinsjenkins00000000000000# Translations template for keystone. # Copyright (C) 2016 OpenStack Foundation # This file is distributed under the same license as the keystone project. # FIRST AUTHOR , 2016. # #, fuzzy msgid "" msgstr "" "Project-Id-Version: keystone 9.0.0.0b4.dev83\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n" "POT-Creation-Date: 2016-03-11 06:03+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 2.2.0\n" #: keystone/exception.py:66 msgid "missing exception kwargs (programmer error)" msgstr "" #: keystone/assignment/core.py:1393 #, python-format msgid "" "delete_domain_assignments method not found in custom assignment driver. " "Domain assignments for domain (%s) to users from other domains will not " "be removed. This was added in V9 of the assignment driver." msgstr "" #: keystone/auth/controllers.py:468 #, python-format msgid "" "User %(user_id)s doesn't have access to default project %(project_id)s. " "The token will be unscoped rather than scoped to the project." msgstr "" #: keystone/auth/controllers.py:476 #, python-format msgid "" "User %(user_id)s's default project %(project_id)s is disabled. The token " "will be unscoped rather than scoped to the project." msgstr "" #: keystone/auth/controllers.py:485 #, python-format msgid "" "User %(user_id)s's default project %(project_id)s not found. The token " "will be unscoped rather than scoped to the project." msgstr "" #: keystone/cmd/cli.py:455 msgid "" "keystone-manage pki_setup is deprecated as of Mitaka in favor of not " "using PKI tokens and may be removed in 'O' release." msgstr "" #: keystone/cmd/cli.py:458 msgid "keystone-manage pki_setup is not recommended for production use." msgstr "" #: keystone/cmd/cli.py:477 msgid "keystone-manage ssl_setup is not recommended for production use." msgstr "" #: keystone/cmd/cli.py:650 #, python-format msgid "Ignoring file (%s) while scanning domain config directory" msgstr "" #: keystone/common/authorization.py:69 msgid "RBAC: Invalid user data in token" msgstr "" #: keystone/common/controller.py:102 keystone/middleware/auth.py:102 msgid "RBAC: Invalid token" msgstr "" #: keystone/common/controller.py:127 keystone/common/controller.py:246 #: keystone/common/controller.py:799 msgid "RBAC: Bypassing authorization" msgstr "" #: keystone/common/controller.py:735 msgid "No domain information specified as part of list request" msgstr "" #: keystone/common/controller.py:771 msgid "" "Not specifying a domain during a create user, group or project call, and " "relying on falling back to the default domain, is deprecated as of " "Liberty and will be removed in the N release. Specify the domain " "explicitly or use a domain-scoped token" msgstr "" #: keystone/common/openssl.py:74 msgid "Failed to invoke ``openssl version``, assuming is v1.0 or newer" msgstr "" #: keystone/common/utils.py:129 #, python-format msgid "Truncating user password to %d characters." msgstr "" #: keystone/common/utils.py:552 msgid "Couldn't find the auth context." msgstr "" #: keystone/common/wsgi.py:252 #, python-format msgid "Authorization failed. %(exception)s from %(remote_addr)s" msgstr "" #: keystone/common/kvs/core.py:153 #, python-format msgid "%s is not a dogpile.proxy.ProxyBackend" msgstr "" #: keystone/common/kvs/core.py:428 #, python-format msgid "KVS lock released (timeout reached) for: %s" msgstr "" #: keystone/common/ldap/core.py:1033 msgid "" "LDAP Server does not support paging. Disable paging in keystone.conf to " "avoid this message." msgstr "" #: keystone/common/ldap/core.py:1232 #, python-format msgid "" "Invalid additional attribute mapping: \"%s\". Format must be " ":" msgstr "" #: keystone/common/ldap/core.py:1343 #, python-format msgid "" "ID attribute %(id_attr)s for LDAP object %(dn)s has multiple values and " "therefore cannot be used as an ID. Will get the ID from DN instead" msgstr "" #: keystone/common/ldap/core.py:1704 #, python-format msgid "" "When deleting entries for %(search_base)s, could not delete nonexistent " "entries %(entries)s%(dots)s" msgstr "" #: keystone/endpoint_policy/core.py:94 #, python-format msgid "" "Endpoint %(endpoint_id)s referenced in association for policy " "%(policy_id)s not found." msgstr "" #: keystone/endpoint_policy/core.py:181 #, python-format msgid "" "Unsupported policy association found - Policy %(policy_id)s, Endpoint " "%(endpoint_id)s, Service %(service_id)s, Region %(region_id)s, " msgstr "" #: keystone/endpoint_policy/core.py:197 #, python-format msgid "" "Policy %(policy_id)s referenced in association for endpoint " "%(endpoint_id)s not found." msgstr "" #: keystone/federation/utils.py:606 msgid "Ignoring user name" msgstr "" #: keystone/identity/controllers.py:145 #, python-format msgid "Unable to remove user %(user)s from %(tenant)s." msgstr "" #: keystone/identity/controllers.py:164 #, python-format msgid "Unable to add user %(user)s to %(tenant)s." msgstr "" #: keystone/identity/core.py:131 #, python-format msgid "Invalid domain name (%s) found in config file name" msgstr "" #: keystone/identity/core.py:169 #, python-format msgid "Unable to locate domain config directory: %s" msgstr "" #: keystone/identity/core.py:691 #, python-format msgid "" "Found multiple domains being mapped to a driver that does not support " "that (e.g. LDAP) - Domain ID: %(domain)s, Default Driver: %(driver)s" msgstr "" #: keystone/middleware/auth.py:81 msgid "" "build_auth_context middleware checking for the admin token is deprecated " "as of the Mitaka release and will be removed in the O release. If your " "deployment requires use of the admin token, update keystone-paste.ini so " "that admin_token_auth is before build_auth_context in the paste " "pipelines, otherwise remove the admin_token_auth middleware from the " "paste pipelines." msgstr "" #: keystone/middleware/auth.py:195 msgid "" "Auth context already exists in the request environment; it will be used " "for authorization instead of creating a new one." msgstr "" #: keystone/middleware/core.py:63 msgid "" "The admin_token_auth middleware presents a security risk and should be " "removed from the [pipeline:api_v3], [pipeline:admin_api], and " "[pipeline:public_api] sections of your paste ini file." msgstr "" #: keystone/resource/core.py:899 msgid "" "The default domain was created automatically to contain V2 resources. " "This is deprecated in the M release and will not be supported in the O " "release. Create the default domain manually or use the keystone-manage " "bootstrap command." msgstr "" #: keystone/resource/core.py:1948 #, python-format msgid "" "Found what looks like an unmatched config option substitution reference -" " domain: %(domain)s, group: %(group)s, option: %(option)s, value: " "%(value)s. Perhaps the config option to which it refers has yet to be " "added?" msgstr "" #: keystone/resource/core.py:1955 #, python-format msgid "" "Found what looks like an incorrectly constructed config option " "substitution reference - domain: %(domain)s, group: %(group)s, option: " "%(option)s, value: %(value)s." msgstr "" #: keystone/resource/backends/sql.py:222 #, python-format msgid "Project %s does not exist and was not deleted." msgstr "" #: keystone/server/common.py:43 msgid "insecure_debug is enabled so responses may include sensitive information." msgstr "" #: keystone/token/persistence/core.py:220 #, python-format msgid "" "`token_api.%s` is deprecated as of Juno in favor of utilizing methods on " "`token_provider_api` and may be removed in Kilo." msgstr "" #: keystone/token/persistence/backends/kvs.py:58 msgid "" "It is recommended to only use the base key-value-store implementation for" " the token driver for testing purposes. Please use 'memcache' or 'sql' " "instead." msgstr "" #: keystone/token/persistence/backends/kvs.py:207 #, python-format msgid "Token `%s` is expired, not adding to the revocation list." msgstr "" #: keystone/token/persistence/backends/kvs.py:250 #, python-format msgid "" "Removing `%s` from revocation list due to invalid expires data in " "revocation list." msgstr "" #: keystone/token/providers/fernet/utils.py:50 #, python-format msgid "[fernet_tokens] key_repository is world readable: %s" msgstr "" #: keystone/token/providers/fernet/utils.py:94 #, python-format msgid "" "Unable to change the ownership of [fernet_tokens] key_repository without " "a keystone user ID and keystone group ID both being provided: %s" msgstr "" #: keystone/token/providers/fernet/utils.py:116 #, python-format msgid "" "Unable to change the ownership of the new key without a keystone user ID " "and keystone group ID both being provided: %s" msgstr "" #: keystone/token/providers/fernet/utils.py:214 msgid "" "[fernet_tokens] max_active_keys must be at least 1 to maintain a primary " "key." msgstr "" #: keystone/version/service.py:77 msgid "'local conf' from PasteDeploy INI is being ignored." msgstr "" keystone-9.0.0/keystone/locale/tr_TR/0000775000567000056710000000000012701407246020657 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/locale/tr_TR/LC_MESSAGES/0000775000567000056710000000000012701407246022444 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/locale/tr_TR/LC_MESSAGES/keystone-log-error.po0000664000567000056710000001167112701407102026550 0ustar jenkinsjenkins00000000000000# Translations template for keystone. # Copyright (C) 2015 OpenStack Foundation # This file is distributed under the same license as the keystone project. # # Translators: # OpenStack Infra , 2015. #zanata msgid "" msgstr "" "Project-Id-Version: keystone 9.0.0.0b2.dev256\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n" "POT-Creation-Date: 2016-01-18 05:50+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2015-08-04 01:50+0000\n" "Last-Translator: İşbaran Akçayır \n" "Language: tr-TR\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Turkish (Turkey)\n" msgid "Cannot retrieve Authorization headers" msgstr "Yetkilendirme başlıkları alınamıyor" #, python-format msgid "" "Circular reference or a repeated entry found in projects hierarchy - " "%(project_id)s." msgstr "" "Proje sıra düzeninde çember başvuru ya da tekrar eden girdi bulundu - " "%(project_id)s." #, python-format msgid "" "Circular reference or a repeated entry found in region tree - %(region_id)s." msgstr "" "Bölge ağacında çember başvuru ya da tekrar eden girdi bulundu - " "%(region_id)s." #, python-format msgid "" "Circular reference or a repeated entry found projects hierarchy - " "%(project_id)s." msgstr "" "Proje sıra düzeninde çember başvuru ya da tekrar eden girdi bulundu - " "%(project_id)s." #, python-format msgid "Could not bind to %(host)s:%(port)s" msgstr "%(host)s:%(port)s adresine bağlanılamadı" #, python-format msgid "" "Either [fernet_tokens] key_repository does not exist or Keystone does not " "have sufficient permission to access it: %s" msgstr "" "[fernet_tokents] key_repository mevcut değil ya da Keystone erişmek için " "yeterli izine sahip değil: %s" msgid "" "Error setting up the debug environment. Verify that the option --debug-url " "has the format : and that a debugger processes is listening on " "that port." msgstr "" "Hata ayıklama ortamının ayarlanmasında hata. --debug-url seçeneğinin " ": biçimine sahip olduğunu ve bu bağlantı " "noktasında hata ayıklama sürecinin dinlediğini doğrulayın." msgid "Failed to construct notifier" msgstr "Bildirici inşa etme başarısız" msgid "" "Failed to create [fernet_tokens] key_repository: either it already exists or " "you don't have sufficient permissions to create it" msgstr "" "[fernet_tokens] key_repository oluşturulamıyor: ya zaten mevcut ya da " "oluşturmak için yeterli izniniz yok" #, python-format msgid "Failed to remove file %(file_path)r: %(error)s" msgstr "%(file_path)r dosyası silinemedi: %(error)s" #, python-format msgid "Failed to send %(action)s %(event_type)s notification" msgstr "%(action)s %(event_type)s bildirimi gönderilemedi" #, python-format msgid "Failed to send %(res_id)s %(event_type)s notification" msgstr "%(res_id)s %(event_type)s bildirimi gönderilemedi" msgid "Failed to validate token" msgstr "Jeton doğrulama başarısız" #, python-format msgid "Malformed endpoint %(url)s - unknown key %(keyerror)s" msgstr "Kusurlu bitiş noktası %(url)s - bilinmeyen anahtar %(keyerror)s" #, python-format msgid "" "Malformed endpoint %s - incomplete format (are you missing a type notifier ?)" msgstr "" "Kusurlu bitiş noktası %s - tamamlanmamış biçim (bir tür bildiriciniz eksik " "olabilir mi ?)" #, python-format msgid "" "Malformed endpoint '%(url)s'. The following type error occurred during " "string substitution: %(typeerror)s" msgstr "" "Kusurlu bitiş noktası '%(url)s'. Karakter dizisi yer değiştirme sırasında şu " "tür hatası oluştu: %(typeerror)s" #, python-format msgid "Malformed endpoint - %(url)r is not a string" msgstr "Kusurlu bitiş noktası - %(url)r bir karakter dizisi değil" #, python-format msgid "" "Reinitializing revocation list due to error in loading revocation list from " "backend. Expected `list` type got `%(type)s`. Old revocation list data: " "%(list)r" msgstr "" "Arka uçtan feshetme listesi yüklemedeki hata sebebiyle fesih listesi yeniden " "ilklendiriliyor. `list` beklendi `%(type)s` alındı. Eski fesih listesi " "verisi: %(list)r" msgid "Server error" msgstr "Sunucu hatası" #, python-format msgid "Unable to convert Keystone user or group ID. Error: %s" msgstr "Keystone kullanıcı veya grup kimliği dönüştürülemiyor. Hata: %s" msgid "Unable to sign token" msgstr "Jeton imzalanamıyor" #, python-format msgid "Unexpected error or malformed token determining token expiry: %s" msgstr "Jeton sona erme belirlemede beklenmeyen hata veya kusurlu jeton: %s" #, python-format msgid "" "Unexpected results in response for domain config - %(count)s responses, " "first option is %(option)s, expected option %(expected)s" msgstr "" "Alan yapılandırması yanıtında beklenmedik sonuçlar - %(count)s yanıt, ilk " "seçenek %(option)s, beklenen seçenek %(expected)s" keystone-9.0.0/keystone/locale/tr_TR/LC_MESSAGES/keystone-log-critical.po0000664000567000056710000000154212701407102027205 0ustar jenkinsjenkins00000000000000# Translations template for keystone. # Copyright (C) 2015 OpenStack Foundation # This file is distributed under the same license as the keystone project. # # Translators: # OpenStack Infra , 2015. #zanata msgid "" msgstr "" "Project-Id-Version: keystone 9.0.0.0b2.dev256\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n" "POT-Creation-Date: 2016-01-18 05:50+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2015-08-04 01:49+0000\n" "Last-Translator: İşbaran Akçayır \n" "Language: tr-TR\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Turkish (Turkey)\n" #, python-format msgid "Unable to open template file %s" msgstr "%s şablon dosyası açılamıyor" keystone-9.0.0/keystone/locale/tr_TR/LC_MESSAGES/keystone-log-warning.po0000664000567000056710000002074312701407102027064 0ustar jenkinsjenkins00000000000000# Translations template for keystone. # Copyright (C) 2015 OpenStack Foundation # This file is distributed under the same license as the keystone project. # # Translators: # OpenStack Infra , 2015. #zanata msgid "" msgstr "" "Project-Id-Version: keystone 9.0.0.0b2.dev256\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n" "POT-Creation-Date: 2016-01-18 05:50+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2015-09-03 12:54+0000\n" "Last-Translator: openstackjenkins \n" "Language: tr-TR\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Turkish (Turkey)\n" #, python-format msgid "%s is not a dogpile.proxy.ProxyBackend" msgstr "%s dogpile.proxy.ProxyBackend değil" #, python-format msgid "Authorization failed. %(exception)s from %(remote_addr)s" msgstr "Yetkilendirme başarısız. %(remote_addr)s den %(exception)s" #, python-format msgid "" "Endpoint %(endpoint_id)s referenced in association for policy %(policy_id)s " "not found." msgstr "" "%(policy_id)s ile ilişkisi için başvurulan bitiş noktası %(endpoint_id)s " "bulunamadı." msgid "Failed to invoke ``openssl version``, assuming is v1.0 or newer" msgstr "" "``openssl version`` çalıştırılamadı, v1.0 ya da daha yeni olarak varsayılıyor" #, python-format msgid "" "Found multiple domains being mapped to a driver that does not support that " "(e.g. LDAP) - Domain ID: %(domain)s, Default Driver: %(driver)s" msgstr "" "Bunu desteklemeyen bir sürücüye eşleştirilen birden fazla alan bulundu (örn. " "LDAP) - Alan ID: %(domain)s, Varsayılan Sürücü: %(driver)s" #, python-format msgid "" "Found what looks like an incorrectly constructed config option substitution " "reference - domain: %(domain)s, group: %(group)s, option: %(option)s, value: " "%(value)s." msgstr "" "Düzgün inşa edilmemiş yapılandırma seçeneği yer değiştirme referansına " "benzeyen bir şey bulundu - alan: %(domain)s, grup: %(group)s, seçenek: " "%(option)s, değer: %(value)s." #, python-format msgid "" "Found what looks like an unmatched config option substitution reference - " "domain: %(domain)s, group: %(group)s, option: %(option)s, value: %(value)s. " "Perhaps the config option to which it refers has yet to be added?" msgstr "" "Eşleşmemiş yapılandırma seçeneği yer değiştirme referansı gibi görünen bir " "şey bulundu - alan: %(domain)s, grup: %(group)s, seçenek: %(option)s, değer: " "%(value)s. Belki başvurduğu yapılandırma seçeneği henüz eklenmemiştir?" #, python-format msgid "Ignoring file (%s) while scanning domain config directory" msgstr "Alan yapılandırma dizini taranırken dosya (%s) atlanıyor" msgid "Ignoring user name" msgstr "Kullanıcı adı atlanıyor" #, python-format msgid "" "Invalid additional attribute mapping: \"%s\". Format must be " ":" msgstr "" "Geçersiz ek öznitelik eşleştirmesi: \"%s\". Biçim :" " olmalı" #, python-format msgid "Invalid domain name (%s) found in config file name" msgstr "Yapılandırma dosyası isminde geçersiz alan adı (%s) bulundu" msgid "" "It is recommended to only use the base key-value-store implementation for " "the token driver for testing purposes. Please use 'memcache' or 'sql' " "instead." msgstr "" "Jeton sürücüsü için temel anahtar-değer-depolama uygulamasının yalnızca test " "amaçlı kullanımı önerilir. Lütfen 'memcache' ya da 'sql' kullanın." #, python-format msgid "KVS lock released (timeout reached) for: %s" msgstr "KVS kilidi kaldırıldı (zaman aşımına uğradı): %s" msgid "" "LDAP Server does not support paging. Disable paging in keystone.conf to " "avoid this message." msgstr "" "LDAP Sunucu sayfalamayı desteklemiyor. Bu iletiyi almamak için sayfalamayı " "keystone.conf'da kapatın." msgid "No domain information specified as part of list request" msgstr "Listeleme isteğinin parçası olarak alan bilgisi belirtilmedi" #, python-format msgid "" "Policy %(policy_id)s referenced in association for endpoint %(endpoint_id)s " "not found." msgstr "" "%(endpoint_id)s bitiş noktası için ilişkisi için başvurulan %(policy_id)s " "ilkesi bulunamadı." msgid "RBAC: Bypassing authorization" msgstr "RBAC: Yetkilendirme baypas ediliyor" msgid "RBAC: Invalid token" msgstr "RBAC: Geçersiz jeton" msgid "RBAC: Invalid user data in token" msgstr "RBAC: Jetonda geçersiz kullanıcı verisi" #, python-format msgid "" "Removing `%s` from revocation list due to invalid expires data in revocation " "list." msgstr "" "feshetme listesindeki geçersiz sona erme tarihi verisi sebebiyle `%s` " "feshetme listesinden kaldırılıyor." #, python-format msgid "Token `%s` is expired, not adding to the revocation list." msgstr "`%s` jetonunun süresi dolmuş, feshetme listesine eklenmiyor." #, python-format msgid "Truncating user password to %d characters." msgstr "Kullanıcı parolası %d karaktere kırpılıyor." #, python-format msgid "Unable to add user %(user)s to %(tenant)s." msgstr "Kullanıcı %(user)s %(tenant)s'e eklenemiyor." #, python-format msgid "" "Unable to change the ownership of [fernet_tokens] key_repository without a " "keystone user ID and keystone group ID both being provided: %s" msgstr "" "Hem keystone kullanıcı kimliği hem keystone grup kimliği verilmeden " "[fernet_tokens] key_repository sahipliği değiştirilemiyor: %s" #, python-format msgid "" "Unable to change the ownership of the new key without a keystone user ID and " "keystone group ID both being provided: %s" msgstr "" "Hem keystone kullanıcı kimliği hem keystone grup kimliği verilmeden yeni " "anahtarın sahipliği değiştirilemiyor: %s" #, python-format msgid "Unable to locate domain config directory: %s" msgstr "Alan yapılandırma dizini bulunamadı: %s" #, python-format msgid "Unable to remove user %(user)s from %(tenant)s." msgstr "Kullanıcı %(user)s %(tenant)s'den çıkarılamadı." #, python-format msgid "" "Unsupported policy association found - Policy %(policy_id)s, Endpoint " "%(endpoint_id)s, Service %(service_id)s, Region %(region_id)s, " msgstr "" "Desteklenmeyen ilke ilişkilendirmesi bulundu - İlke %(policy_id)s, Bitiş " "noktası %(endpoint_id)s, Servis %(service_id)s, Bölge %(region_id)s, " #, python-format msgid "" "User %(user_id)s doesn't have access to default project %(project_id)s. The " "token will be unscoped rather than scoped to the project." msgstr "" "%(user_id)s kullanıcısı varsayılan proje %(project_id)s erişimine sahip " "değil. Jeton projeye kapsamsız olacak, kapsamlı değil." #, python-format msgid "" "User %(user_id)s's default project %(project_id)s is disabled. The token " "will be unscoped rather than scoped to the project." msgstr "" "%(user_id)s kullanıcısının varsayılan projesi %(project_id)s kapalı. Jeton " "projeye kapsamsız olacak, kapsamlı değil." #, python-format msgid "" "User %(user_id)s's default project %(project_id)s not found. The token will " "be unscoped rather than scoped to the project." msgstr "" "%(user_id)s kullanıcısının varsayılan projesi %(project_id)s bulunamadı. " "Jeton projeye kapsamsız olacak, kapsamlı değil." #, python-format msgid "" "When deleting entries for %(search_base)s, could not delete nonexistent " "entries %(entries)s%(dots)s" msgstr "" "%(search_base)s için girdiler silinirken, mevcut olmayan girdiler %(entries)s" "%(dots)s silinemedi" #, python-format msgid "[fernet_tokens] key_repository is world readable: %s" msgstr "[fernet_tokens] key_repository herkesçe okunabilir: %s" msgid "" "[fernet_tokens] max_active_keys must be at least 1 to maintain a primary key." msgstr "" "[fernet_tokens] max_active_keys bir birincil anahtarı korumak için en az 1 " "olmalı." #, python-format msgid "" "`token_api.%s` is deprecated as of Juno in favor of utilizing methods on " "`token_provider_api` and may be removed in Kilo." msgstr "" "`token_provider_api` üzerindeki yöntemlerden faydalanmak için `token_api.%s` " "Juno'dan sonra tercih edilmeyecek ve Kilo'da kaldırılabilir." msgid "keystone-manage pki_setup is not recommended for production use." msgstr "keystone-manage pki_setup üretimde kullanmak için tavsiye edilmez." msgid "keystone-manage ssl_setup is not recommended for production use." msgstr "keystone-manage ssl_setup üretimde kullanmak için tavsiye edilmez." msgid "missing exception kwargs (programmer error)" msgstr "istisna kwargs eksik (programcı hatası)" keystone-9.0.0/keystone/locale/tr_TR/LC_MESSAGES/keystone.po0000664000567000056710000011254212701407102024641 0ustar jenkinsjenkins00000000000000# Translations template for keystone. # Copyright (C) 2015 OpenStack Foundation # This file is distributed under the same license as the keystone project. # # Translators: # Alper Çiftçi , 2015 # Andreas Jaeger , 2015 # catborise , 2013 # catborise , 2013 # OpenStack Infra , 2015. #zanata msgid "" msgstr "" "Project-Id-Version: keystone 9.0.0.0b4.dev37\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n" "POT-Creation-Date: 2016-03-07 18:22+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2015-09-03 12:54+0000\n" "Last-Translator: openstackjenkins \n" "Language: tr-TR\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Turkish (Turkey)\n" #, python-format msgid "%(detail)s" msgstr "%(detail)s" #, python-format msgid "" "%(event)s is not a valid notification event, must be one of: %(actions)s" msgstr "" "%(event)s geçerli bir bilgilendirme olayı değil, şunlardan biri olmalı: " "%(actions)s" #, python-format msgid "%(host)s is not a trusted dashboard host" msgstr "%(host)s güvenilir bir gösterge paneli istemcisi değil" #, python-format msgid "%(message)s %(amendment)s" msgstr "%(message)s %(amendment)s" #, python-format msgid "" "%(mod_name)s doesn't provide database migrations. The migration repository " "path at %(path)s doesn't exist or isn't a directory." msgstr "" "%(mod_name)s veri tabanı göçü sağlamıyor. %(path)s yolundaki göç deposu yolu " "mevcut değil ya da bir dizin değil." #, python-format msgid "%(property_name)s cannot be less than %(min_length)s characters." msgstr "%(property_name)s %(min_length)s karakterden az olamaz." #, python-format msgid "%(property_name)s is not a %(display_expected_type)s" msgstr "%(property_name)s bir %(display_expected_type)s değil" #, python-format msgid "%(property_name)s should not be greater than %(max_length)s characters." msgstr "%(property_name)s %(max_length)s karakterden büyük olmamalı." #, python-format msgid "%s cannot be empty." msgstr "%s boş olamaz." #, python-format msgid "%s extension does not exist." msgstr "%s eklentisi mevcut değil." #, python-format msgid "%s field is required and cannot be empty" msgstr "%s alanı gerekli ve boş olamaz" #, python-format msgid "%s field(s) cannot be empty" msgstr "%s alan(lar)ı boş olamaz" msgid "--all option cannot be mixed with other options" msgstr "--all seçeneği diğer seçeneklerle birleştirilemez" msgid "A project-scoped token is required to produce a service catalog." msgstr "Servis kataloğu oluşturmak için proje-kapsamlı bir jeton gerekli." msgid "Access token is expired" msgstr "Erişim jetonunun süresi dolmuş" msgid "Access token not found" msgstr "Erişim jetonu bulunamadı" msgid "Additional authentications steps required." msgstr "Ek kimlik doğrulama adımları gerekli." msgid "An unexpected error occurred when retrieving domain configs" msgstr "Alan yapılandırmaları alınırken beklenmedik hata oluştu" #, python-format msgid "An unexpected error occurred when trying to store %s" msgstr "%s depolanırken beklenmedik bir hata oluştu" msgid "An unexpected error prevented the server from fulfilling your request." msgstr "Beklenmedik bir hata sunucunun isteğinizi tamamlamasını engelledi." #, python-format msgid "" "An unexpected error prevented the server from fulfilling your request: " "%(exception)s" msgstr "" "Beklenmedik bir hata sunucunun isteğinizi tamamlamasını engelledi: " "%(exception)s" msgid "An unhandled exception has occurred: Could not find metadata." msgstr "Ele alınmayan istisna oluştu: Metadata bulunamadı." msgid "At least one option must be provided" msgstr "En az bir seçenek sağlanmalıdır" msgid "At least one option must be provided, use either --all or --domain-name" msgstr "En az bir seçenek sağlanmalıdır, ya --all ya da --domain-name kullanın" msgid "At least one role should be specified." msgstr "En az bir kural belirtilmeli." msgid "Attempted to authenticate with an unsupported method." msgstr "Desteklenmeyen yöntem ile doğrulama girişiminde bulunuldu." msgid "" "Attempting to use OS-FEDERATION token with V2 Identity Service, use V3 " "Authentication" msgstr "" "OS-FEDERATION jetonu V2 Kimlik Servisi ile kullanılmaya çalışılıyor, V3 " "Kimlik Doğrulama kullanın" msgid "Authentication plugin error." msgstr "Kimlik doğrulama eklenti hatası." #, python-format msgid "" "Backend `%(backend)s` is not a valid memcached backend. Valid backends: " "%(backend_list)s" msgstr "" "Arka uç `%(backend)s` geçerli bir memcached arka ucu değil. Geçerli arka " "uçlar: %(backend_list)s" msgid "Cannot authorize a request token with a token issued via delegation." msgstr "Vekil ile sağlanan bir jeton ile istek yetkilendirilemez." #, python-format msgid "Cannot change %(option_name)s %(attr)s" msgstr "%(option_name)s %(attr)s değiştirilemiyor" msgid "Cannot change Domain ID" msgstr "Alan ID'si değiştirilemez" msgid "Cannot change user ID" msgstr "Kullanıcı ID'si değiştirilemiyor" msgid "Cannot change user name" msgstr "Kullanıcı adı değiştirilemiyor" #, python-format msgid "Cannot create an endpoint with an invalid URL: %(url)s" msgstr "%(url)s geçersiz URL' si ile bir bitiş noktası yaratılamıyor" #, python-format msgid "Cannot create project with parent: %(project_id)s" msgstr "Üst proje %(project_id)s ye sahip proje oluşturulamıyor" msgid "Cannot list request tokens with a token issued via delegation." msgstr "Vekalet ile sağlanan bir jeton ile istek jetonları listelenemez." #, python-format msgid "Cannot open certificate %(cert_file)s. Reason: %(reason)s" msgstr "Sertifika %(cert_file)s açılamıyor. Sebep: %(reason)s" #, python-format msgid "Cannot remove role that has not been granted, %s" msgstr "Verilmemiş rol silinemez, %s" msgid "" "Cannot truncate a driver call without hints list as first parameter after " "self " msgstr "" "self'den sonra ilk parametre olarak ipucu listesi verilmeden bir sürücü " "çağrısı kırpılamıyor " msgid "" "Cannot use parents_as_list and parents_as_ids query params at the same time." msgstr "" "parents_as_list ve parents_as_ids sorgu parametreleri aynı anda kullanılamaz." msgid "" "Cannot use subtree_as_list and subtree_as_ids query params at the same time." msgstr "" "subtree_as_list ve subtree_as_ids sorgu parametreleri aynı anda kullanılamaz." msgid "" "Combining effective and group filter will always result in an empty list." msgstr "" "Efektif ve grup filtresini birleştirmek her zaman boş bir listeye yol açar." msgid "" "Combining effective, domain and inherited filters will always result in an " "empty list." msgstr "" "Efektif, alan ve miras filtrelerin birleştirilmesi her zaman boş bir listeye " "yol açar." #, python-format msgid "Conflict occurred attempting to store %(type)s - %(details)s" msgstr "%(type)s depolanırken çatışma oluştu- %(details)s" #, python-format msgid "Conflicting region IDs specified: \"%(url_id)s\" != \"%(ref_id)s\"" msgstr "Çatışan bölge kimlikleri belirtildi: \"%(url_id)s\" != \"%(ref_id)s\"" msgid "Consumer not found" msgstr "Tüketici bulunamadı" #, python-format msgid "" "Could not change immutable attribute(s) '%(attributes)s' in target %(target)s" msgstr "" "%(target)s hedefindeki değişmez öznitelik(ler) '%(attributes)s' " "değiştirilemiyor" #, python-format msgid "" "Could not find %(group_or_option)s in domain configuration for domain " "%(domain_id)s" msgstr "" "%(domain_id)s alanı için alan yapılandırmasında %(group_or_option)s " "bulunamadı" #, python-format msgid "Could not find Endpoint Group: %(endpoint_group_id)s" msgstr "Bitişnoktası Grubu bulunamadı: %(endpoint_group_id)s" msgid "Could not find Identity Provider identifier in environment" msgstr "Kimlik Sağlayıcı tanımlayıcısı ortamda bulunamıyor" #, python-format msgid "Could not find Identity Provider: %(idp_id)s" msgstr "Kimlik Sağlayıcı bulunamadı: %(idp_id)s" #, python-format msgid "Could not find Service Provider: %(sp_id)s" msgstr "Servis Sağlayıcı bulunamadı: %(sp_id)s" #, python-format msgid "Could not find credential: %(credential_id)s" msgstr "Kimlik bilgisi bulunamadı: %(credential_id)s" #, python-format msgid "Could not find domain: %(domain_id)s" msgstr "Alan bulunamadı: %(domain_id)s" #, python-format msgid "Could not find endpoint: %(endpoint_id)s" msgstr "Bitiş noktası bulunamadı: %(endpoint_id)s" #, python-format msgid "" "Could not find federated protocol %(protocol_id)s for Identity Provider: " "%(idp_id)s" msgstr "" "Kimlik Sağlayıcı: %(idp_id)s için birleşmiş iletişim kuralı %(protocol_id)s " "bulunamadı" #, python-format msgid "Could not find group: %(group_id)s" msgstr "Grup bulunamadı: %(group_id)s" #, python-format msgid "Could not find mapping: %(mapping_id)s" msgstr "Eşleştirme bulunamadı: %(mapping_id)s" msgid "Could not find policy association" msgstr "İlke ilişkilendirme bulunamadı" #, python-format msgid "Could not find policy: %(policy_id)s" msgstr "İlke bulunamadı: %(policy_id)s" #, python-format msgid "Could not find project: %(project_id)s" msgstr "Proje bulunamadı: %(project_id)s" #, python-format msgid "Could not find region: %(region_id)s" msgstr "Bölge bulunamadı: %(region_id)s" #, python-format msgid "" "Could not find role assignment with role: %(role_id)s, user or group: " "%(actor_id)s, project or domain: %(target_id)s" msgstr "" "Rol: %(role_id)s, kullanıcı veya grup: %(actor_id)s, proje veya alan: " "%(target_id)s ile rol ataması bulunamadı" #, python-format msgid "Could not find role: %(role_id)s" msgstr "Rol bulunamadı: %(role_id)s" #, python-format msgid "Could not find service: %(service_id)s" msgstr "Servis bulunamadı: %(service_id)s" #, python-format msgid "Could not find token: %(token_id)s" msgstr "Jeton bulunamadı: %(token_id)s" #, python-format msgid "Could not find trust: %(trust_id)s" msgstr "Güven bulunamadı: %(trust_id)s" #, python-format msgid "Could not find user: %(user_id)s" msgstr "Kullanıcı bulunamadı: %(user_id)s" #, python-format msgid "Could not find version: %(version)s" msgstr "Sürüm bulunamadı: %(version)s" #, python-format msgid "Could not find: %(target)s" msgstr "Bulunamadı: %(target)s" msgid "Could not validate the access token" msgstr "Erişim jetonu doğrulanamadı" msgid "Credential belongs to another user" msgstr "Kimlik bilgisi başka bir kullanıcıya ait" msgid "" "Disabling an entity where the 'enable' attribute is ignored by configuration." msgstr "" "'enable' özniteliği yapılandırma tarafından göz ardı edilen bir öğe " "kapatılıyor." #, python-format msgid "Domain (%s)" msgstr "Alan (%s)" #, python-format msgid "Domain cannot be named %s" msgstr "Alan %s olarak adlandırılamaz" #, python-format msgid "Domain cannot have ID %s" msgstr "Alan %s ID'sine sahip olamaz" #, python-format msgid "Domain is disabled: %s" msgstr "Alan kapalı: %s" msgid "Domain scoped token is not supported" msgstr "Alan kapsamlı jeton desteklenmiyor" #, python-format msgid "" "Domain: %(domain)s already has a configuration defined - ignoring file: " "%(file)s." msgstr "" "Alan: %(domain)s zaten tanımlanmış bir yapılandırmaya sahip - dosya " "atlanıyor: %(file)s." msgid "Duplicate Entry" msgstr "Kopya Girdi" #, python-format msgid "Duplicate ID, %s." msgstr "Kopya ID, %s" #, python-format msgid "Duplicate name, %s." msgstr "Kopya isim, %s." msgid "Enabled field must be a boolean" msgstr "Etkin alan bool olmalı" msgid "Enabled field should be a boolean" msgstr "Etkin alan bool olmalı" #, python-format msgid "Endpoint %(endpoint_id)s not found in project %(project_id)s" msgstr "Bitiş noktası %(endpoint_id)s %(project_id)s projesinde bulunamadı" msgid "Endpoint Group Project Association not found" msgstr "Bitiş Noktası Grup Proje İlişkisi bulunamadı" msgid "Ensure configuration option idp_entity_id is set." msgstr "idp_entity_id yapılandırma seçeneğinin ayarlandığına emin olun." msgid "Ensure configuration option idp_sso_endpoint is set." msgstr "idp_sso_endpoint yapılandırma seçeneğinin ayarlandığına emin olun." #, python-format msgid "" "Error parsing configuration file for domain: %(domain)s, file: %(file)s." msgstr "" "Alan: %(domain)s için yapılandırma dosyası ayrıştırılırken hata, dosya: " "%(file)s." #, python-format msgid "Error while opening file %(path)s: %(err)s" msgstr "Dosya açılırken hata %(path)s: %(err)s" #, python-format msgid "Error while parsing line: '%(line)s': %(err)s" msgstr "Satır ayrıştırılırken hata: '%(line)s': %(err)s" #, python-format msgid "Error while parsing rules %(path)s: %(err)s" msgstr "Kurallar ayrıştırılırken hata %(path)s: %(err)s" #, python-format msgid "Error while reading metadata file, %(reason)s" msgstr "Metadata dosyası okunurken hata, %(reason)s" #, python-format msgid "Expected dict or list: %s" msgstr "Sözlük ya da liste beklendi: %s" msgid "" "Expected signing certificates are not available on the server. Please check " "Keystone configuration." msgstr "" "Beklenen imzalama sertifikaları sunucuda kullanılabilir değil. Lütfen " "Keystone yapılandırmasını kontrol edin." #, python-format msgid "" "Expecting to find %(attribute)s in %(target)s - the server could not comply " "with the request since it is either malformed or otherwise incorrect. The " "client is assumed to be in error." msgstr "" "%(target)s içinde %(attribute)s bulunması bekleniyordu - sunucu talebi " "yerine getiremedi çünkü ya istek kusurluydu ya da geçersizdi. İstemcinin " "hatalı olduğu varsayılıyor." #, python-format msgid "Failed to start the %(name)s server" msgstr "%(name)s sunucusu başlatılamadı" msgid "Failed to validate token" msgstr "Jeton doğrulama başarısız" msgid "Federation token is expired" msgstr "Federasyon jetonunun süresi dolmuş" #, python-format msgid "" "Field \"remaining_uses\" is set to %(value)s while it must not be set in " "order to redelegate a trust" msgstr "" "\"remaining_uses\" alanı %(value)s olarak ayarlanmış, bir güvene tekrar " "yetki vermek için böyle ayarlanmamalı" msgid "Found invalid token: scoped to both project and domain." msgstr "Geçersiz jeton bulundu: hem proje hem alana kapsanmış." #, python-format msgid "Group %(group)s is not supported for domain specific configurations" msgstr "%(group)s grubu alana özel yapılandırmalar için desteklenmiyor" #, python-format msgid "" "Group %(group_id)s returned by mapping %(mapping_id)s was not found in the " "backend." msgstr "" "%(mapping_id)s eşleştirmesi tarafından döndürülen %(group_id)s grubu arka " "uçta bulunamadı." #, python-format msgid "" "Group membership across backend boundaries is not allowed, group in question " "is %(group_id)s, user is %(user_id)s" msgstr "" "Arka uç sınırları arasında grup üyeliğine izin verilmez, sorudaki grup " "%(group_id)s, kullanıcı ise %(user_id)s" #, python-format msgid "ID attribute %(id_attr)s not found in LDAP object %(dn)s" msgstr "ID özniteliği %(id_attr)s %(dn)s LDAP nesnesinde bulunamadı" #, python-format msgid "Identity Provider %(idp)s is disabled" msgstr "Kimlik Sağlayıcı %(idp)s kapalı" msgid "" "Incoming identity provider identifier not included among the accepted " "identifiers." msgstr "" "Gelen kimlik sağlayıcı tanımlayıcısı kabul edilen tanımlayıcılar arasında " "yok." #, python-format msgid "Invalid LDAP TLS certs option: %(option)s. Choose one of: %(options)s" msgstr "" "Geçersiz LDAP TLS sertifika seçeneği: %(option)s. Şunlardan birini seçin: " "%(options)s" #, python-format msgid "Invalid LDAP TLS_AVAIL option: %s. TLS not available" msgstr "Geçersiz LDAP TLS_AVAIL seçeneği: %s. TLS kullanılabilir değil" #, python-format msgid "Invalid LDAP deref option: %(option)s. Choose one of: %(options)s" msgstr "" "Geçersiz LDAP referans kaldırma seçeneği: %(option)s. Şunlardan birini " "seçin: %(options)s" #, python-format msgid "Invalid LDAP scope: %(scope)s. Choose one of: %(options)s" msgstr "Geçersiz LDAP kapsamı: %(scope)s. Şunlardan birini seçin: %(options)s" msgid "Invalid TLS / LDAPS combination" msgstr "Geçersiz TLS / LDAPS kombinasyonu" #, python-format msgid "Invalid audit info data type: %(data)s (%(type)s)" msgstr "Geçersiz denetim bilgisi veri türü: %(data)s (%(type)s)" msgid "Invalid blob in credential" msgstr "Kimlik bilgisinde geçersiz düğüm" #, python-format msgid "" "Invalid domain name: %(domain)s found in config file name: %(file)s - " "ignoring this file." msgstr "" "Yapılandırma dosyası isminde: %(file)s geçersiz alan adı: %(domain)s bulundu " "- bu dosya atlanıyor." #, python-format msgid "Invalid domain specific configuration: %(reason)s" msgstr "Geçersiz alana özel yapılandırma: %(reason)s" #, python-format msgid "Invalid input for field '%(path)s'. The value is '%(value)s'." msgstr "'%(path)s' alanı için geçersiz girdi. Değer '%(value)s'." msgid "Invalid limit value" msgstr "Geçersiz sınır değeri" #, python-format msgid "" "Invalid mix of entities for policy association - only Endpoint, Service or " "Region+Service allowed. Request was - Endpoint: %(endpoint_id)s, Service: " "%(service_id)s, Region: %(region_id)s" msgstr "" "İlke ilişkilendirmeleri için geçersiz öğe karışımı - yalnızca Bitişnoktası, " "Servis veya Bölge+Servise izin verilir. İstek şuydu Bitişnoktası: " "%(endpoint_id)s, Servis: %(service_id)s, Bölge: %(region_id)s" #, python-format msgid "" "Invalid rule: %(identity_value)s. Both 'groups' and 'domain' keywords must " "be specified." msgstr "" "Geçersiz kural: %(identity_value)s. Hem 'gruplar' hem 'alan' anahtar " "kelimeleri belirtilmeli." msgid "Invalid signature" msgstr "Geçersiz imza" msgid "Invalid user / password" msgstr "Geçersiz kullanıcı / parola" msgid "Invalid username or password" msgstr "Geçersiz kullanıcı adı ve parola" #, python-format msgid "KVS region %s is already configured. Cannot reconfigure." msgstr "KVS bölgesi %s zaten yapılandırılmış. Yeniden yapılandırılamıyor." #, python-format msgid "Key Value Store not configured: %s" msgstr "Anahtar Değer Deposu yapılandırılmamış: %s" #, python-format msgid "LDAP %s create" msgstr "LDAP %s oluştur" #, python-format msgid "LDAP %s delete" msgstr "LDAP %s sil" #, python-format msgid "LDAP %s update" msgstr "LDAP %s güncelle" #, python-format msgid "Lock Timeout occurred for key, %(target)s" msgstr "Anahtar için Kilit Zaman Aşımı oluştu, %(target)s" #, python-format msgid "Lock key must match target key: %(lock)s != %(target)s" msgstr "Kilit anahtarı hedef anahtarla eşleşmeli: %(lock)s != %(target)s" #, python-format msgid "Malformed endpoint URL (%(endpoint)s), see ERROR log for details." msgstr "" "Kusurlu bitiş noktası URL'si (%(endpoint)s), detaylar için HATA kaydına " "bakın." msgid "Marker could not be found" msgstr "İşaretçi bulunamadı" #, python-format msgid "Maximum lock attempts on %s occurred." msgstr "%s üzerinde azami kilit girişimi yapıldı." #, python-format msgid "Member %(member)s is already a member of group %(group)s" msgstr "Üye %(member)s zaten %(group)s grubunun üyesi" #, python-format msgid "Method not callable: %s" msgstr "Metod çağrılabilir değil: %s" msgid "Missing entity ID from environment" msgstr "Öğe kimliği ortamdan eksik" msgid "" "Modifying \"redelegation_count\" upon redelegation is forbidden. Omitting " "this parameter is advised." msgstr "" "Tekrar yetkilendirme üzerine \"redelegation_count\" değiştirmeye izin " "verilmez. Tavsiye edildiği gibi bu parametre atlanıyor." msgid "Multiple domains are not supported" msgstr "Birden çok alan desteklenmiyor" msgid "Must be called within an active lock context." msgstr "Etkin kilik içeriği içinde çağrılmalı." msgid "Must specify either domain or project" msgstr "Alan ya da projeden biri belirtilmelidir" msgid "Name field is required and cannot be empty" msgstr "İsim alanı gerekli ve boş olamaz" msgid "" "No Authorization headers found, cannot proceed with OAuth related calls, if " "running under HTTPd or Apache, ensure WSGIPassAuthorization is set to On." msgstr "" "Yetkilendirme başlıkları bulunamadı, OAuth ile ilişkili çağrılarla devam " "edilemez, HTTPd veya Apache altında çalışıyorsanız, WSGIPassAuthorization " "ayarını açtığınızdan emin olun." msgid "No authenticated user" msgstr "Kimlik denetimi yapılmamış kullanıcı" msgid "" "No encryption keys found; run keystone-manage fernet_setup to bootstrap one." msgstr "" "Şifreleme anahtarları bulundu; birini yükletmek için keystone-manage " "fernet_setup çalıştırın." msgid "No options specified" msgstr "Hiçbir seçenek belirtilmedi" #, python-format msgid "No policy is associated with endpoint %(endpoint_id)s." msgstr "Hiçbir ilke %(endpoint_id)s bitiş noktasıyla ilişkilendirilmemiş." #, python-format msgid "No remaining uses for trust: %(trust_id)s" msgstr "Güven için kalan kullanım alanı yok: %(trust_id)s" msgid "Non-default domain is not supported" msgstr "Varsayılan olmayan alan desteklenmiyor" msgid "One of the trust agents is disabled or deleted" msgstr "Güven ajanlarından biri kapalı ya da silinmiş" #, python-format msgid "" "Option %(option)s found with no group specified while checking domain " "configuration request" msgstr "" "%(option)s seçeneği alan yapılandırma isteği kontrol edilirken hiçbir grup " "belirtilmemiş şekilde bulundu" #, python-format msgid "" "Option %(option)s in group %(group)s is not supported for domain specific " "configurations" msgstr "" "%(group)s grubundaki %(option)s seçeneği alana özel yapılandırmalarda " "desteklenmiyor" #, python-format msgid "Project (%s)" msgstr "Proje (%s)" #, python-format msgid "Project ID not found: %(t_id)s" msgstr "Proje kimliği bulunamadı: %(t_id)s" msgid "Project field is required and cannot be empty." msgstr "Proje alanı gerekli ve boş olamaz." #, python-format msgid "Project is disabled: %s" msgstr "Proje kapalı: %s" msgid "Redelegation allowed for delegated by trust only" msgstr "" "Tekrar yetki vermeye yalnızca güven tarafından yetki verilenler için izin " "verilir" #, python-format msgid "" "Remaining redelegation depth of %(redelegation_depth)d out of allowed range " "of [0..%(max_count)d]" msgstr "" "izin verilen [0..%(max_count)d] aralığı içinden %(redelegation_depth)d izin " "verilen tekrar yetki verme derinliği" msgid "Request Token does not have an authorizing user id" msgstr "İstek Jetonu yetki veren bir kullanıcı id'sine sahip değil" #, python-format msgid "" "Request attribute %(attribute)s must be less than or equal to %(size)i. The " "server could not comply with the request because the attribute size is " "invalid (too large). The client is assumed to be in error." msgstr "" "İstek özniteliği %(attribute)s %(size)i boyutuna eşit ya da daha küçük " "olmalı. Sunucu talebi yerine getiremedi çünkü öznitelik boyutu geçersiz (çok " "büyük). İstemcinin hata durumunda olduğu varsayılıyor." msgid "Request must have an origin query parameter" msgstr "İstek bir başlangıç noktası sorgu parametresine sahip olmalı" msgid "Request token is expired" msgstr "İstek jetonunun süresi dolmuş" msgid "Request token not found" msgstr "İstek jetonu bulunamadı" msgid "Requested expiration time is more than redelegated trust can provide" msgstr "" "İstenen zaman bitim süresi tekrar yetkilendirilen güvenin " "sağlayabileceğinden fazla" #, python-format msgid "" "Requested redelegation depth of %(requested_count)d is greater than allowed " "%(max_count)d" msgstr "" "%(requested_count)d istenen tekrar yetki verme derinliği izin verilen " "%(max_count)d den fazla" msgid "" "Running keystone via eventlet is deprecated as of Kilo in favor of running " "in a WSGI server (e.g. mod_wsgi). Support for keystone under eventlet will " "be removed in the \"M\"-Release." msgstr "" "Bir WSGI sunucuda (örn. mod_wsgi) çalıştırmak adına, keystone'nin eventlet " "ile çalıştırılması Kilo'dan sonra desteklenmiyor. Eventlet altında keystone " "desteği \"M\"-Sürümünde kaldırılacak." msgid "Scoping to both domain and project is not allowed" msgstr "Hem alan hem projeye kapsamlamaya izin verilmez" msgid "Scoping to both domain and trust is not allowed" msgstr "Hem alan hem güvene kapsamlamaya izin verilmez" msgid "Scoping to both project and trust is not allowed" msgstr "Hem proje hem güvene kapsamlamaya izin verilmez" #, python-format msgid "Service Provider %(sp)s is disabled" msgstr "Servis Sağlayıcı %(sp)s kapalı" msgid "Some of requested roles are not in redelegated trust" msgstr "İstenen rollerin bazıları tekrar yetki verilen güven içinde değil" msgid "Specify a domain or project, not both" msgstr "Bir alan ya da proje belirtin, ya da her ikisini" msgid "Specify a user or group, not both" msgstr "Bir kullanıcı ya da grup belirtin, ikisini birden değil" msgid "Specify one of domain or project" msgstr "Alandan ya da projeden birini belirtin" msgid "Specify one of user or group" msgstr "Kullanıcı ya da grup belirtin" #, python-format msgid "" "String length exceeded.The length of string '%(string)s' exceeded the limit " "of column %(type)s(CHAR(%(length)d))." msgstr "" "Karakter dizisi uzunluğu aşıldı. '%(string)s' karakter dizisiz uzunluğu " "%(type)s(CHAR(%(length)d)) sütunu sınırını aşıyor." msgid "" "The 'expires_at' must not be before now. The server could not comply with " "the request since it is either malformed or otherwise incorrect. The client " "is assumed to be in error." msgstr "" "'expires_at' şu andan önce olmamalı. Sunucu talebi yerine getiremedi çünkü " "istek ya kusurlu ya da geçersiz. İstemcinin hata durumunda olduğu " "varsayılıyor." msgid "The --all option cannot be used with the --domain-name option" msgstr "--all seçeneği --domain-name seçeneğiyle kullanılamaz" #, python-format msgid "The Keystone configuration file %(config_file)s could not be found." msgstr "Keystone yapılandırma dosyası %(config_file)s bulunamadı." #, python-format msgid "" "The Keystone domain-specific configuration has specified more than one SQL " "driver (only one is permitted): %(source)s." msgstr "" "Keystone alana özel yapılandırması birden fazla SQL sürücüsü belirtti " "(yalnızca birine izin verilir): %(source)s." msgid "The action you have requested has not been implemented." msgstr "İstediğiniz eylem uygulanmamış." msgid "The authenticated user should match the trustor." msgstr "Yetkilendirilen kullanıcı güven verenle eşleşmeli." msgid "" "The certificates you requested are not available. It is likely that this " "server does not use PKI tokens otherwise this is the result of " "misconfiguration." msgstr "" "İstediğiniz sertifikalar kullanılabilir değil. Bu sunucu muhtemelen PKI " "jetonlarını kullanmıyor ya da bu bir yanlış yapılandırmanın sonucu." #, python-format msgid "" "The password length must be less than or equal to %(size)i. The server could " "not comply with the request because the password is invalid." msgstr "" "Parola uzunluğu %(size)i ye eşit ya da daha küçük olmalı. Sunucu talebe " "cevap veremedi çünkü parola geçersiz." msgid "The request you have made requires authentication." msgstr "Yaptığınız istek kimlik doğrulama gerektiriyor." msgid "The resource could not be found." msgstr "Kaynak bulunamadı." msgid "" "The revoke call must not have both domain_id and project_id. This is a bug " "in the Keystone server. The current request is aborted." msgstr "" "İptal etme çağrısı hem domain_id hem project_id'ye sahip olmamalı. Bu " "Keystone sunucudaki bir hata. Mevcut istek iptal edildi." msgid "The service you have requested is no longer available on this server." msgstr "İstediğiniz servis artık bu sunucu üzerinde kullanılabilir değil." #, python-format msgid "" "The specified parent region %(parent_region_id)s would create a circular " "region hierarchy." msgstr "" "Belirtilen üst bölge %(parent_region_id)s dairesel bölge sıralı dizisi " "oluştururdu." #, python-format msgid "" "The value of group %(group)s specified in the config should be a dictionary " "of options" msgstr "" "Yapılandırmada belirtilen %(group)s grubunun değeri seçenekler sözlüğü olmalı" msgid "There should not be any non-oauth parameters" msgstr "Herhangi bir non-oauth parametresi olmamalı" #, python-format msgid "This is not a recognized Fernet payload version: %s" msgstr "Bu bilinen bir Fernet faydalı yük sürümü değil: %s" msgid "" "Timestamp not in expected format. The server could not comply with the " "request since it is either malformed or otherwise incorrect. The client is " "assumed to be in error." msgstr "" "Zaman damgası beklenen biçimde değil. Sunucu talebi yerine getiremedi çünkü " "istek ya kusurlu ya da geçersiz. İstemcinin hata durumunda olduğu " "varsayılıyor." #, python-format msgid "" "To get a more detailed information on this error, re-run this command for " "the specific domain, i.e.: keystone-manage domain_config_upload --domain-" "name %s" msgstr "" "Bu hatayla ilgili daha detaylı bilgi almak için, bu komutu belirtilen alan " "için tekrar çalıştırın, örn.: keystone-manage domain_config_upload --domain-" "name %s" msgid "Token belongs to another user" msgstr "Jeton başka bir kullanıcıya ait" msgid "Token does not belong to specified tenant." msgstr "Jeton belirtilen kiracıya ait değil." msgid "Trustee has no delegated roles." msgstr "Yedieminin emanet edilen kuralları yok." msgid "Trustor is disabled." msgstr "Güven kurucu kapalı." #, python-format msgid "" "Trying to update group %(group)s, so that, and only that, group must be " "specified in the config" msgstr "" "%(group)s grubu güncellenmeye çalışılıyor, böylece yapılandırmada yalnızca " "grup belirtilmeli" #, python-format msgid "" "Trying to update option %(option)s in group %(group)s, but config provided " "contains option %(option_other)s instead" msgstr "" "%(group)s grubundaki %(option)s seçeneği güncellenmeye çalışılıyor, ama " "sağlanan yapılandırma %(option_other)s seçeneğini içeriyor" #, python-format msgid "" "Trying to update option %(option)s in group %(group)s, so that, and only " "that, option must be specified in the config" msgstr "" "%(group)s grubundaki %(option)s seçeneği güncellenmeye çalışıldı, böylece, " "yapılandırmada yalnızca bu seçenek belirtilmeli" msgid "" "Unable to access the keystone database, please check it is configured " "correctly." msgstr "" "Keystone veri tabanına erişilemiyor, lütfen doğru yapılandırıldığından emin " "olun." #, python-format msgid "Unable to consume trust %(trust_id)s, unable to acquire lock." msgstr "%(trust_id)s güveni tüketilemedi, kilit elde edilemiyor." #, python-format msgid "" "Unable to delete region %(region_id)s because it or its child regions have " "associated endpoints." msgstr "" "Bölge %(region_id)s silinemedi çünkü kendisi ya da alt bölgelerinin " "ilişkilendirilmiş bitiş noktaları var." #, python-format msgid "Unable to find valid groups while using mapping %(mapping_id)s" msgstr "Eşleştirme %(mapping_id)s kullanırken geçerli gruplar bulunamadı" #, python-format msgid "Unable to locate domain config directory: %s" msgstr "Alan yapılandırma dizini bulunamıyor: %s" #, python-format msgid "Unable to lookup user %s" msgstr "%s kullanıcısı aranamadı" #, python-format msgid "" "Unable to reconcile identity attribute %(attribute)s as it has conflicting " "values %(new)s and %(old)s" msgstr "" "Kimlik özniteliği %(attribute)s bağdaştırılamıyor çünkü çatışan değerleri " "var %(new)s ve %(old)s" #, python-format msgid "" "Unable to sign SAML assertion. It is likely that this server does not have " "xmlsec1 installed, or this is the result of misconfiguration. Reason " "%(reason)s" msgstr "" "SAML ifadesi imzalanamıyor. Muhtemelen bu sunucuda xmlsec1 kurulu değil, " "veya bu bir yanlış yapılandırmanın sonucu. Sebep %(reason)s" msgid "Unable to sign token." msgstr "Jeton imzalanamıyor." #, python-format msgid "Unexpected assignment type encountered, %s" msgstr "Beklenmedik atama türüyle karşılaşıldı, %s" #, python-format msgid "" "Unexpected combination of grant attributes - User: %(user_id)s, Group: " "%(group_id)s, Project: %(project_id)s, Domain: %(domain_id)s" msgstr "" "İzin özniteliklerinin beklenmedik katışımı - Kullanıcı: %(user_id)s, Grup: " "%(group_id)s, Proje: %(project_id)s, Alan: %(domain_id)s" #, python-format msgid "Unexpected status requested for JSON Home response, %s" msgstr "JSON Home yanıtı için beklenmedik durum istendi, %s" msgid "Unknown Target" msgstr "Bilinmeyen Hedef" #, python-format msgid "Unknown domain '%(name)s' specified by --domain-name" msgstr "--domain-name ile bilinmeyen alan '%(name)s' belirtilmiş" #, python-format msgid "Unknown token version %s" msgstr "Bilinmeyen jeton sürümü %s" #, python-format msgid "Unregistered dependency: %(name)s for %(targets)s" msgstr "Kaydı silinmiş bağımlılık: %(targets)s için %(name)s" msgid "Update of `parent_id` is not allowed." msgstr "`parent_id` güncellemesine izin verilmiyor." msgid "Use a project scoped token when attempting to create a SAML assertion" msgstr "" "SAML iddiası oluşturma girişimi sırasında proje kapsamlı bir jeton kullan" #, python-format msgid "User %(u_id)s is unauthorized for tenant %(t_id)s" msgstr "%(u_id)s kullanıcısı %(t_id)s kiracısı için yetkilendirilmemiş" #, python-format msgid "User %(user_id)s has no access to domain %(domain_id)s" msgstr "%(user_id)s kullanıcısının %(domain_id)s alanına erişimi yok" #, python-format msgid "User %(user_id)s has no access to project %(project_id)s" msgstr "%(user_id)s kullanıcısının %(project_id)s projesine erişimi yok" #, python-format msgid "User %(user_id)s is already a member of group %(group_id)s" msgstr "Kullanıcı %(user_id)s zaten %(group_id)s grubu üyesi" #, python-format msgid "User '%(user_id)s' not found in group '%(group_id)s'" msgstr "Kullanıcı '%(user_id)s' '%(group_id)s' grubunda bulunamadı" msgid "User IDs do not match" msgstr "Kullanıcı ID leri uyuşmuyor" #, python-format msgid "User is disabled: %s" msgstr "Kullanıcı kapalı: %s" msgid "User is not a member of the requested project" msgstr "Kullanıcı istenen projenin üyesi değil" msgid "User is not a trustee." msgstr "Kullanıcı güvenilir değil." msgid "User not found" msgstr "Kullanıcı bulunamadı" msgid "User roles not supported: tenant_id required" msgstr "Kullanıcı rolleri desteklenmiyor: tenant_id gerekli" #, python-format msgid "User type %s not supported" msgstr "Kullanıcı türü %s desteklenmiyor" msgid "You are not authorized to perform the requested action." msgstr "İstenen eylemi gerçekleştirmek için yetkili değilsiniz." #, python-format msgid "You are not authorized to perform the requested action: %(action)s" msgstr "İstenen eylemi gerçekleştirmek için yetkili değilsiniz: %(action)s" msgid "`key_mangler` functions must be callable." msgstr "`key_mangler` fonksiyonları çağrılabilir olmalı." msgid "`key_mangler` option must be a function reference" msgstr "`key_mangler` seçeneği fonksiyon referansı olmalı" msgid "any options" msgstr "herhangi bir seçenek" msgid "auth_type is not Negotiate" msgstr "auth_type Negotiate değil" msgid "authorizing user does not have role required" msgstr "yetkilendiren kullanıcı gerekli role sahip değil" #, python-format msgid "cannot create a project in a branch containing a disabled project: %s" msgstr "kapalı bir proje içeren bir alt grupta proje oluşturulamaz: %s" #, python-format msgid "group %(group)s" msgstr "grup %(group)s" msgid "" "idp_contact_type must be one of: [technical, other, support, administrative " "or billing." msgstr "" "idp_contact_type şunlardan biri olmalı: [teknik, diğer, destek, idari veya " "faturalama." #, python-format msgid "invalid date format %s" msgstr "geçersiz tarih biçimi %s" #, python-format msgid "option %(option)s in group %(group)s" msgstr "%(group)s grubundaki %(option)s seçeneği" msgid "provided consumer key does not match stored consumer key" msgstr "sağlanan tüketici anahtarı depolanan tüketici anahtarıyla eşleşmiyor" msgid "provided request key does not match stored request key" msgstr "sağlanan istek anahtarı depolanan istek anahtarıyla eşleşmiyor" msgid "provided verifier does not match stored verifier" msgstr "sağlanan doğrulayıcı depolanan doğrulayıcı ile eşleşmiyor" msgid "remaining_uses must be a positive integer or null." msgstr "remaining_uses pozitif bir değer ya da null olmalı." msgid "remaining_uses must not be set if redelegation is allowed" msgstr "tekrar yetkilendirmeye izin veriliyorsa remaining_uses ayarlanmamalı" #, python-format msgid "" "request to update group %(group)s, but config provided contains group " "%(group_other)s instead" msgstr "" "%(group)s grubunu güncelleme isteği, ama sağlanan yapılandırma " "%(group_other)s grubunu içeriyor" msgid "rescope a scoped token" msgstr "kapsamlı bir jeton tekrar kapsamlandı" #, python-format msgid "tls_cacertdir %s not found or is not a directory" msgstr "tls_cacertdir %s bulunamadı ya da bir dizin" #, python-format msgid "tls_cacertfile %s not found or is not a file" msgstr "tls_cacertfile %s bulunamadı ya da bir dosya değil" #, python-format msgid "token reference must be a KeystoneToken type, got: %s" msgstr "jeton referansı bir KeystoneToken türünde olmalı, alınan: %s" keystone-9.0.0/keystone/locale/zh_CN/0000775000567000056710000000000012701407246020626 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/locale/zh_CN/LC_MESSAGES/0000775000567000056710000000000012701407246022413 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/locale/zh_CN/LC_MESSAGES/keystone-log-error.po0000664000567000056710000001152412701407102026514 0ustar jenkinsjenkins00000000000000# Translations template for keystone. # Copyright (C) 2015 OpenStack Foundation # This file is distributed under the same license as the keystone project. # # Translators: # Xiao Xi LIU , 2014 # 刘俊朋 , 2015 # OpenStack Infra , 2015. #zanata # Andreas Jaeger , 2016. #zanata # Gaoxiao Zhu , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: keystone 9.0.0.0rc2.dev1\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n" "POT-Creation-Date: 2016-03-16 22:54+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-15 10:40+0000\n" "Last-Translator: Andreas Jaeger \n" "Language: zh-CN\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Chinese (China)\n" msgid "Cannot retrieve Authorization headers" msgstr "无法获取认证头信息" #, python-format msgid "" "Circular reference or a repeated entry found in projects hierarchy - " "%(project_id)s." msgstr "在项目树-%(project_id)s 中发现循环引用或重复项。" #, python-format msgid "" "Circular reference or a repeated entry found in region tree - %(region_id)s." msgstr "在域树- %(region_id)s 中发现循环引用或重复项。" #, python-format msgid "" "Circular reference or a repeated entry found projects hierarchy - " "%(project_id)s." msgstr "在项目树-%(project_id)s 中发现循环引用或重复项。" #, python-format msgid "Could not bind to %(host)s:%(port)s" msgstr "无法绑定至 %(host)s:%(port)s" #, python-format msgid "" "Either [fernet_tokens] key_repository does not exist or Keystone does not " "have sufficient permission to access it: %s" msgstr "[fernet_tokens] 键仓库不存在或者ketystone没有足够的权限去访问它: %s。" msgid "" "Error setting up the debug environment. Verify that the option --debug-url " "has the format : and that a debugger processes is listening on " "that port." msgstr "" "设置调试环境出错。请确保选项--debug-url 的格式是这样的: ,和确保" "有一个调试进程正在监听那个端口" #, python-format msgid "Error when signing assertion, reason: %(reason)s%(output)s" msgstr "对断言进行签名时出错,原因:%(reason)s%(output)s" msgid "Failed to construct notifier" msgstr "构造通知器失败" msgid "" "Failed to create [fernet_tokens] key_repository: either it already exists or " "you don't have sufficient permissions to create it" msgstr "创建[Fernet_tokens] 键仓库失败:它已存在或你没有足够的权限去创建它。" msgid "Failed to create the default domain." msgstr "无法创建默认域。" #, python-format msgid "Failed to remove file %(file_path)r: %(error)s" msgstr "无法删除文件%(file_path)r: %(error)s" #, python-format msgid "Failed to send %(action)s %(event_type)s notification" msgstr "发送 %(action)s %(event_type)s 通知失败" #, python-format msgid "Failed to send %(res_id)s %(event_type)s notification" msgstr "发送%(res_id)s %(event_type)s 通知失败" msgid "Failed to validate token" msgstr "token验证失败" #, python-format msgid "Malformed endpoint %(url)s - unknown key %(keyerror)s" msgstr "端点 %(url)s 的格式不正确 - 键 %(keyerror)s 未知" #, python-format msgid "" "Malformed endpoint %s - incomplete format (are you missing a type notifier ?)" msgstr "端点 %s 的格式不完整 - (是否缺少了类型通告者?)" #, python-format msgid "" "Malformed endpoint '%(url)s'. The following type error occurred during " "string substitution: %(typeerror)s" msgstr "" "端点 '%(url)s' 的格式不正确。在字符串替换时发生以下类型错误:%(typeerror)s" #, python-format msgid "Malformed endpoint - %(url)r is not a string" msgstr "端点 - %(url)r 不是一个字符串" #, python-format msgid "" "Reinitializing revocation list due to error in loading revocation list from " "backend. Expected `list` type got `%(type)s`. Old revocation list data: " "%(list)r" msgstr "" "由于从后端加载撤销列表出现错误,重新初始化撤销列表。期望“列表”类型是 `" "%(type)s`。旧的撤销列表数据是: %(list)r" msgid "Server error" msgstr "服务器报错" msgid "Unable to sign token" msgstr "无法签名令牌" #, python-format msgid "Unexpected error or malformed token determining token expiry: %s" msgstr "决策令牌预计超期时间 :%s 时,出现未知错误或变形的令牌" #, python-format msgid "" "Unexpected results in response for domain config - %(count)s responses, " "first option is %(option)s, expected option %(expected)s" msgstr "" "针对域配置- %(count)s 结果,响应中出现不是预期结果,第一参数是%(option)s,期" "望参数是 %(expected)s 。" keystone-9.0.0/keystone/locale/zh_CN/LC_MESSAGES/keystone-log-critical.po0000664000567000056710000000153412701407102027155 0ustar jenkinsjenkins00000000000000# Translations template for keystone. # Copyright (C) 2015 OpenStack Foundation # This file is distributed under the same license as the keystone project. # # Translators: # OpenStack Infra , 2015. #zanata msgid "" msgstr "" "Project-Id-Version: keystone 9.0.0.0b2.dev256\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n" "POT-Creation-Date: 2016-01-18 05:50+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2014-08-31 03:19+0000\n" "Last-Translator: openstackjenkins \n" "Language: zh-CN\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Chinese (China)\n" #, python-format msgid "Unable to open template file %s" msgstr "无法打开模板文件 %s" keystone-9.0.0/keystone/locale/zh_CN/LC_MESSAGES/keystone.po0000664000567000056710000014275312701407105024622 0ustar jenkinsjenkins00000000000000# Translations template for keystone. # Copyright (C) 2015 OpenStack Foundation # This file is distributed under the same license as the keystone project. # # Translators: # Zhong Chaoliang , 2013 # Dongliang Yu , 2013 # Lee Yao , 2013 # Lee Yao , 2013 # Zhong Chaoliang , 2013 # 颜海峰 , 2014 # Lucas Palm , 2015. #zanata # OpenStack Infra , 2015. #zanata # Linda , 2016. #zanata # Lucas Palm , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: keystone 9.0.0.0rc2.dev6\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n" "POT-Creation-Date: 2016-03-22 15:08+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-22 10:25+0000\n" "Last-Translator: Linda \n" "Language: zh-CN\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Chinese (China)\n" #, python-format msgid "%(detail)s" msgstr "%(detail)s" #, python-format msgid "%(driver)s is not supported driver version" msgstr "%(driver)s 并非受支持驱动程序版本" #, python-format msgid "" "%(entity)s name cannot contain the following reserved characters: %(chars)s" msgstr "%(entity)s 名称不能包含以下保留字符:%(chars)s" #, python-format msgid "" "%(event)s is not a valid notification event, must be one of: %(actions)s" msgstr "%(event)s 不是有效通知事件,必须是下列其中一项:%(actions)s" #, python-format msgid "%(host)s is not a trusted dashboard host" msgstr "%(host)s 不是可信的仪表板主机" #, python-format msgid "%(message)s %(amendment)s" msgstr "%(message)s %(amendment)s" #, python-format msgid "" "%(mod_name)s doesn't provide database migrations. The migration repository " "path at %(path)s doesn't exist or isn't a directory." msgstr "" "%(mod_name)s 未提供数据库迁移。%(path)s 处的迁移存储库路径不存在或者不是目" "录。" #, python-format msgid "%(prior_role_id)s does not imply %(implied_role_id)s" msgstr "%(prior_role_id)s 并未暗示 %(implied_role_id)s" #, python-format msgid "%(property_name)s cannot be less than %(min_length)s characters." msgstr "%(property_name)s 不能少于 %(min_length)s 个字符。" #, python-format msgid "%(property_name)s is not a %(display_expected_type)s" msgstr "%(property_name)s 不在 %(display_expected_type)s 之中" #, python-format msgid "%(property_name)s should not be greater than %(max_length)s characters." msgstr "%(property_name)s 不应该超过 %(max_length)s 个字符。" #, python-format msgid "%(role_id)s cannot be an implied roles" msgstr "%(role_id)s 不能是暗示角色" #, python-format msgid "%s cannot be empty." msgstr "%s 不能为空。" #, python-format msgid "%s extension does not exist." msgstr "%s 扩展不存在。" #, python-format msgid "%s field is required and cannot be empty" msgstr "%s 字段是必填字段,不能为空" #, python-format msgid "%s field(s) cannot be empty" msgstr "%s 字段不能为空" #, python-format msgid "" "%s for the LDAP identity backend has been deprecated in the Mitaka release " "in favor of read-only identity LDAP access. It will be removed in the \"O\" " "release." msgstr "" "在 Mitaka 发行版中,已不推荐使用 LDAP 身份后端的 %s (以支持只读身份 LDAP 访" "问)。它将在“O”发行版中移除。" msgid "(Disable insecure_debug mode to suppress these details.)" msgstr "(禁用 insecure_debug 方式以避免显示这些详细信息。)" msgid "--all option cannot be mixed with other options" msgstr "--all 选项不能与其他选项一起使用" msgid "A project-scoped token is required to produce a service catalog." msgstr "产生服务目录时需要项目范围的令牌。" msgid "Access token is expired" msgstr "访问令牌已过期" msgid "Access token not found" msgstr "找不到访问令牌" msgid "Additional authentications steps required." msgstr "需要额外身份验证" msgid "An unexpected error occurred when retrieving domain configs" msgstr "检索域配置时发生意外错误" #, python-format msgid "An unexpected error occurred when trying to store %s" msgstr "尝试存储 %s 时发生意外错误" msgid "An unexpected error prevented the server from fulfilling your request." msgstr "意外错误阻止了服务器完成您的请求。" #, python-format msgid "" "An unexpected error prevented the server from fulfilling your request: " "%(exception)s" msgstr "意外错误导致服务器无法完成您的请求:%(exception)s" msgid "An unhandled exception has occurred: Could not find metadata." msgstr "存在无法处理的异常:找不到元数据。" msgid "At least one option must be provided" msgstr "必须至少提供一个选项" msgid "At least one option must be provided, use either --all or --domain-name" msgstr "必须至少提供一个选项,请使用 --all 或 --domain-name" msgid "At least one role should be specified." msgstr "应该至少指定一个角色。" #, python-format msgid "" "Attempted automatic driver selection for assignment based upon " "[identity]\\driver option failed since driver %s is not found. Set " "[assignment]/driver to a valid driver in keystone config." msgstr "" "尝试根据 [identity]\\driver 选项为分配自动选择驱动程序失败,因为找不到驱动程" "序 %s。请在 keystone 配置中将 [assignment]/driver 设置为有效驱动程序。" msgid "Attempted to authenticate with an unsupported method." msgstr "尝试使用未支持的方法进行验证" msgid "" "Attempting to use OS-FEDERATION token with V2 Identity Service, use V3 " "Authentication" msgstr "正在尝试将 OS-FEDERATION 令牌与 V2 身份服务配合使用,请使用 V3 认证" msgid "Authentication plugin error." msgstr "认证插件错误" #, python-format msgid "" "Backend `%(backend)s` is not a valid memcached backend. Valid backends: " "%(backend_list)s" msgstr "后端“%(backend)s”不是有效的 memcached 后端。有效后端:%(backend_list)s" msgid "Cannot authorize a request token with a token issued via delegation." msgstr "无法对带有通过代理发出的令牌的请求令牌授权。" #, python-format msgid "Cannot change %(option_name)s %(attr)s" msgstr "无法更改 %(option_name)s %(attr)s" msgid "Cannot change Domain ID" msgstr "无法更改域标识" msgid "Cannot change user ID" msgstr "无法更改用户标识" msgid "Cannot change user name" msgstr "无法更改用户名" #, python-format msgid "Cannot create an endpoint with an invalid URL: %(url)s" msgstr "无法创建具有无效 URL %(url)s 的端点" #, python-format msgid "Cannot create project with parent: %(project_id)s" msgstr "无法创建具有父代的项目:%(project_id)s" #, python-format msgid "" "Cannot create project, since it specifies its owner as domain %(domain_id)s, " "but specifies a parent in a different domain (%(parent_domain_id)s)." msgstr "" "无法创建项目,因为它将其所有者指定为域 %(domain_id)s,但指定的项目在另一个域 " "(%(parent_domain_id)s) 中。" #, python-format msgid "" "Cannot create project, since its parent (%(domain_id)s) is acting as a " "domain, but project's specified parent_id (%(parent_id)s) does not match " "this domain_id." msgstr "" "无法创建项目,因为其父代 (%(domain_id)s) 正充当域,但该项目的指定 parent_id " "(%(parent_id)s) 与此 domain_id 不匹配。" msgid "Cannot delete a domain that is enabled, please disable it first." msgstr "无法删除已启用的域,请先禁用该域。" #, python-format msgid "" "Cannot delete project %(project_id)s since its subtree contains enabled " "projects." msgstr "无法删除项目 %(project_id)s,因为其子树包含已启用的项目。" #, python-format msgid "" "Cannot delete the project %s since it is not a leaf in the hierarchy. Use " "the cascade option if you want to delete a whole subtree." msgstr "" "无法删除项目 %s,因为它不是该层次结构中的支叶。如果要删除整个子树,请使用级联" "选项。" #, python-format msgid "" "Cannot disable project %(project_id)s since its subtree contains enabled " "projects." msgstr "无法禁用项目 %(project_id)s,因为它的子树包含已启用的项目。" #, python-format msgid "Cannot enable project %s since it has disabled parents" msgstr "无法启用项目 %s,因为它具有已禁用的父代" msgid "Cannot list assignments sourced from groups and filtered by user ID." msgstr "无法列示源自若干组并按用户标识过滤的分配。" msgid "Cannot list request tokens with a token issued via delegation." msgstr "无法列示带有通过代理发出的令牌的请求令牌。" #, python-format msgid "Cannot open certificate %(cert_file)s. Reason: %(reason)s" msgstr "无法打开证书 %(cert_file)s。原因:%(reason)s" #, python-format msgid "Cannot remove role that has not been granted, %s" msgstr "无法除去尚未授予的角色 %s" msgid "" "Cannot truncate a driver call without hints list as first parameter after " "self " msgstr "" "在没有将 hints list 用作 self 后面的第一个参数的情况下,无法截断驱动程序调用" msgid "Cannot update domain_id of a project that has children." msgstr "无法更新具有子代的项目的 domain_id。" msgid "" "Cannot use parents_as_list and parents_as_ids query params at the same time." msgstr "无法同时使用 parents_as_list 和 parents_as_ids 查询参数。" msgid "" "Cannot use subtree_as_list and subtree_as_ids query params at the same time." msgstr "无法同时使用 subtree_as_list 和 subtree_as_ids 查询参数。" msgid "Cascade update is only allowed for enabled attribute." msgstr "只允许对已启用的属性执行级联更新。" msgid "" "Combining effective and group filter will always result in an empty list." msgstr "将有效过滤器与组过滤器进行组合将始终产生空列表。" msgid "" "Combining effective, domain and inherited filters will always result in an " "empty list." msgstr "将有效过滤器、域过滤器和继承的过滤器进行组合将始终产生空列表。" #, python-format msgid "Config API entity at /domains/%s/config" msgstr "在 /domains/%s/config 配置 API 实体" #, python-format msgid "Conflict occurred attempting to store %(type)s - %(details)s" msgstr "尝试存储 %(type)s 时发生冲突 - %(details)s" #, python-format msgid "Conflicting region IDs specified: \"%(url_id)s\" != \"%(ref_id)s\"" msgstr "指定的区域标识有冲突:“%(url_id)s”不等于“%(ref_id)s”" msgid "Consumer not found" msgstr "找不到使用者" #, python-format msgid "" "Could not change immutable attribute(s) '%(attributes)s' in target %(target)s" msgstr "未能更改目标 %(target)s 中的不可变属性 %(attributes)s " #, python-format msgid "" "Could not determine Identity Provider ID. The configuration option " "%(issuer_attribute)s was not found in the request environment." msgstr "" "未能确定身份提供者标识。在请求环境中找不到配置选项 %(issuer_attribute)s。" #, python-format msgid "" "Could not find %(group_or_option)s in domain configuration for domain " "%(domain_id)s" msgstr "在以下域的域配置中找不到 %(group_or_option)s:%(domain_id)s" #, python-format msgid "Could not find Endpoint Group: %(endpoint_group_id)s" msgstr "找不到端点组:%(endpoint_group_id)s" msgid "Could not find Identity Provider identifier in environment" msgstr "在环境中,找不到“身份提供者”标识" #, python-format msgid "Could not find Identity Provider: %(idp_id)s" msgstr "找不到身份提供者:%(idp_id)s" #, python-format msgid "Could not find Service Provider: %(sp_id)s" msgstr "找不到服务提供程序:%(sp_id)s" #, python-format msgid "Could not find credential: %(credential_id)s" msgstr "找不到凭证:%(credential_id)s" #, python-format msgid "Could not find domain: %(domain_id)s" msgstr "找不到域:%(domain_id)s" #, python-format msgid "Could not find endpoint: %(endpoint_id)s" msgstr "找不到端点:%(endpoint_id)s" #, python-format msgid "" "Could not find federated protocol %(protocol_id)s for Identity Provider: " "%(idp_id)s" msgstr "找不到身份提供者 %(idp_id)s 的联合协议 %(protocol_id)s " #, python-format msgid "Could not find group: %(group_id)s" msgstr "找不到组:%(group_id)s" #, python-format msgid "Could not find mapping: %(mapping_id)s" msgstr "找不到映射:%(mapping_id)s" msgid "Could not find policy association" msgstr "找不到策略关联" #, python-format msgid "Could not find policy: %(policy_id)s" msgstr "找不到策略:%(policy_id)s" #, python-format msgid "Could not find project: %(project_id)s" msgstr "找不到项目:%(project_id)s" #, python-format msgid "Could not find region: %(region_id)s" msgstr "找不到区域:%(region_id)s" #, python-format msgid "" "Could not find role assignment with role: %(role_id)s, user or group: " "%(actor_id)s, project or domain: %(target_id)s" msgstr "" "找不到角色分配,角色为 %(role_id)s,用户或组为 %(actor_id)s,项目或域为 " "%(target_id)s" #, python-format msgid "Could not find role: %(role_id)s" msgstr "找不到角色:%(role_id)s" #, python-format msgid "Could not find service: %(service_id)s" msgstr "找不到服务:%(service_id)s" #, python-format msgid "Could not find token: %(token_id)s" msgstr "找不到令牌:%(token_id)s" #, python-format msgid "Could not find trust: %(trust_id)s" msgstr "找不到信任:%(trust_id)s" #, python-format msgid "Could not find user: %(user_id)s" msgstr "找不到用户:%(user_id)s" #, python-format msgid "Could not find version: %(version)s" msgstr "找不到版本:%(version)s" #, python-format msgid "Could not find: %(target)s" msgstr "找不到 %(target)s" msgid "" "Could not map any federated user properties to identity values. Check debug " "logs or the mapping used for additional details." msgstr "" "无法将任何联合用户属性映射至身份值。请检查调试日志或所使用的映射以获取其他信" "息。" msgid "" "Could not map user while setting ephemeral user identity. Either mapping " "rules must specify user id/name or REMOTE_USER environment variable must be " "set." msgstr "" "设置临时用户身份时未能映射用户。映射规则必须指定用户标识/用户名,或者必须设" "置 REMOTE_USER 环境变量。" msgid "Could not validate the access token" msgstr "未能验证访问令牌" msgid "Credential belongs to another user" msgstr "凭证属于另一用户" msgid "Credential signature mismatch" msgstr "凭据签名不匹配" #, python-format msgid "" "Direct import of auth plugin %(name)r is deprecated as of Liberty in favor " "of its entrypoint from %(namespace)r and may be removed in N." msgstr "" "自 Liberty 开始,已不推荐直接导入认证插件 %(name)r(为了支持它在 " "%(namespace)r 中的入口点),并且可能在 N 中移除。" #, python-format msgid "" "Direct import of driver %(name)r is deprecated as of Liberty in favor of its " "entrypoint from %(namespace)r and may be removed in N." msgstr "" "自 Liberty 开始,已不推荐直接导入驱动程序 %(name)r(为了支持它在 " "%(namespace)r 中的入口点),并且可能在 N 中移除。" msgid "" "Disabling an entity where the 'enable' attribute is ignored by configuration." msgstr "正在禁用实体,在此情况下,“enable”属性已由配置忽略。" #, python-format msgid "Domain (%s)" msgstr "域 (%s)" #, python-format msgid "Domain cannot be named %s" msgstr "无法将域命名为 %s" #, python-format msgid "Domain cannot have ID %s" msgstr "域不能具有标识 %s" #, python-format msgid "Domain is disabled: %s" msgstr "域已禁用:%s" msgid "Domain name cannot contain reserved characters." msgstr "域名不能包含保留字符。" msgid "Domain scoped token is not supported" msgstr "作用域限定到域的令牌不受支持" msgid "Domain specific roles are not supported in the V8 role driver" msgstr "V8 角色驱动程序中不支持特定于域的角色" #, python-format msgid "" "Domain: %(domain)s already has a configuration defined - ignoring file: " "%(file)s." msgstr "域 %(domain)s 已定义配置 - 正在忽略以下文件:%(file)s。" msgid "Duplicate Entry" msgstr "重复条目" #, python-format msgid "Duplicate ID, %s." msgstr "标识 %s 重复。" #, python-format msgid "Duplicate entry: %s" msgstr "重复条目:%s" #, python-format msgid "Duplicate name, %s." msgstr "名称 %s 重复。" #, python-format msgid "Duplicate remote ID: %s" msgstr "重复远程标识:%s" msgid "EC2 access key not found." msgstr "找不到 EC2 访问密钥。" msgid "EC2 signature not supplied." msgstr "未提供 EC2 签名。" msgid "" "Either --bootstrap-password argument or OS_BOOTSTRAP_PASSWORD must be set." msgstr "必须设置 --bootstrap-password 自变量或 OS_BOOTSTRAP_PASSWORD。" msgid "Enabled field must be a boolean" msgstr "已启用的字段必须为布尔值" msgid "Enabled field should be a boolean" msgstr "已启用的字段应该为布尔值" #, python-format msgid "Endpoint %(endpoint_id)s not found in project %(project_id)s" msgstr "在项目 %(project_id)s 中找不到端点 %(endpoint_id)s" msgid "Endpoint Group Project Association not found" msgstr "找不到端点组项目关联" msgid "Ensure configuration option idp_entity_id is set." msgstr "请确保设置了配置选项 idp_entity_id。" msgid "Ensure configuration option idp_sso_endpoint is set." msgstr "请确保设置了配置选项 idp_sso_endpoint。" #, python-format msgid "" "Error parsing configuration file for domain: %(domain)s, file: %(file)s." msgstr "解析域 %(domain)s 的配置文件时出错,文件为 %(file)s。" #, python-format msgid "Error while opening file %(path)s: %(err)s" msgstr "打开文件 %(path)s 时出错:%(err)s" #, python-format msgid "Error while parsing line: '%(line)s': %(err)s" msgstr "解析行“%(line)s”时出错:%(err)s" #, python-format msgid "Error while parsing rules %(path)s: %(err)s" msgstr "解析规则 %(path)s 时出错:%(err)s" #, python-format msgid "Error while reading metadata file, %(reason)s" msgstr "读取元数据文件时出错,原因为 %(reason)s" #, python-format msgid "" "Exceeded attempts to register domain %(domain)s to use the SQL driver, the " "last domain that appears to have had it is %(last_domain)s, giving up" msgstr "" "注册域 %(domain)s 以使用 SQL 驱动程序的尝试次数已超出限制,显示为进行此尝试的" "最后一个域为 %(last_domain)s,正在放弃" #, python-format msgid "Expected dict or list: %s" msgstr "期望字典或者列表: %s" msgid "" "Expected signing certificates are not available on the server. Please check " "Keystone configuration." msgstr "在服务器上,期望的签名证书不可用。请检查 Keystone 配置。" #, python-format msgid "" "Expecting to find %(attribute)s in %(target)s - the server could not comply " "with the request since it is either malformed or otherwise incorrect. The " "client is assumed to be in error." msgstr "" "期望在 %(target)s 中找到 %(attribute)s - 服务器未能遵照请求,因为它的格式或者" "其他方面不正确。客户机被认为发生错误。" #, python-format msgid "Failed to start the %(name)s server" msgstr "未能启动 %(name)s 服务器" msgid "Failed to validate token" msgstr "token验证失败" msgid "Federation token is expired" msgstr "联合令牌已到期" #, python-format msgid "" "Field \"remaining_uses\" is set to %(value)s while it must not be set in " "order to redelegate a trust" msgstr "" "字段“remaining_uses”已设置为 %(value)s,尽管为了重新委派信任,不能设置该字段" msgid "Found invalid token: scoped to both project and domain." msgstr "发现无效令牌:范围同时为项目和域。" #, python-format msgid "Group %s not found in config" msgstr "在配置中找不到组 %s。" #, python-format msgid "Group %(group)s is not supported for domain specific configurations" msgstr "特定于域的配置不支持组 %(group)s" #, python-format msgid "" "Group %(group_id)s returned by mapping %(mapping_id)s was not found in the " "backend." msgstr "在后端中,找不到由映射 %(mapping_id)s 返回的组 %(group_id)s。" #, python-format msgid "" "Group membership across backend boundaries is not allowed, group in question " "is %(group_id)s, user is %(user_id)s" msgstr "" "不允许使用跨后端边界的组成员资格,所提到的组为%(group_id)s,用户为 " "%(user_id)s" #, python-format msgid "ID attribute %(id_attr)s not found in LDAP object %(dn)s" msgstr "在 LDAP 对象 %(dn)s 中,找不到标识属性 %(id_attr)s" #, python-format msgid "Identity Provider %(idp)s is disabled" msgstr "身份提供者 %(idp)s 已禁用" msgid "" "Incoming identity provider identifier not included among the accepted " "identifiers." msgstr "新的“身份提供者”标识未包含在已接受的标识中。" msgid "Invalid EC2 signature." msgstr "无效 EC2 签名。" #, python-format msgid "Invalid LDAP TLS certs option: %(option)s. Choose one of: %(options)s" msgstr "LDAP TLS 证书选项 %(option)s 无效。请选择下列其中一项:%(options)s" #, python-format msgid "Invalid LDAP TLS_AVAIL option: %s. TLS not available" msgstr "无效的LDAP TLS_AVAIL 选项: %s.TLS无效" #, python-format msgid "Invalid LDAP deref option: %(option)s. Choose one of: %(options)s" msgstr "LDAP deref 选项 %(option)s 无效。请选择下列其中一项:%(options)s" #, python-format msgid "Invalid LDAP scope: %(scope)s. Choose one of: %(options)s" msgstr "无效的 LDAP作用域: %(scope)s. 选择以下选项之一: %(options)s" msgid "Invalid TLS / LDAPS combination" msgstr "无效的 TLS / LDAPS 组合" #, python-format msgid "Invalid audit info data type: %(data)s (%(type)s)" msgstr "无效审计信息数据类型:%(data)s (%(type)s)" msgid "Invalid blob in credential" msgstr "凭证中的 BLOB 无效" #, python-format msgid "" "Invalid domain name: %(domain)s found in config file name: %(file)s - " "ignoring this file." msgstr "在配置文件名 %(file)s 中找到的域名 %(domain)s 无效 - 正在忽略此文件。" #, python-format msgid "Invalid domain specific configuration: %(reason)s" msgstr "特定于域的配置无效:%(reason)s" #, python-format msgid "Invalid input for field '%(path)s'. The value is '%(value)s'." msgstr "对字段“%(path)s”的输入无效。值为“%(value)s”。" msgid "Invalid limit value" msgstr "限制值无效" #, python-format msgid "" "Invalid mix of entities for policy association - only Endpoint, Service or " "Region+Service allowed. Request was - Endpoint: %(endpoint_id)s, Service: " "%(service_id)s, Region: %(region_id)s" msgstr "" "用于策略关联的实体混合无效 - 仅允许“端点”、“服务”或“区域 + 服务”。请求为 - 端" "点:%(endpoint_id)s,服务:%(service_id)s,区域:%(region_id)s" #, python-format msgid "" "Invalid rule: %(identity_value)s. Both 'groups' and 'domain' keywords must " "be specified." msgstr "规则 %(identity_value)s 无效。必须同时指定关键字“groups”和“domain”。" msgid "Invalid signature" msgstr "签名无效" msgid "Invalid user / password" msgstr "用户/密码无效" msgid "Invalid username or TOTP passcode" msgstr "无效用户名或 TOTP 密码" msgid "Invalid username or password" msgstr "无效用户名或密码" #, python-format msgid "KVS region %s is already configured. Cannot reconfigure." msgstr "KVS 区域 %s 已配置。无法重新配置。" #, python-format msgid "Key Value Store not configured: %s" msgstr "未配置键值存储:%s" #, python-format msgid "LDAP %s create" msgstr "LDAP %s 创建" #, python-format msgid "LDAP %s delete" msgstr "LDAP %s 删除" #, python-format msgid "LDAP %s update" msgstr "LDAP %s 更新" msgid "" "Length of transformable resource id > 64, which is max allowed characters" msgstr "可变换资源标识的长度超过 64 个字符(允许的最大字符数)。" #, python-format msgid "" "Local section in mapping %(mapping_id)s refers to a remote match that " "doesn't exist (e.g. {0} in a local section)." msgstr "" "映射 %(mapping_id)s 中的本地节引用不存在的远程匹配(例如,本地节中的 " "'{0}')。" #, python-format msgid "Lock Timeout occurred for key, %(target)s" msgstr "对于键 %(target)s,发生锁定超时" #, python-format msgid "Lock key must match target key: %(lock)s != %(target)s" msgstr "锁定键必须与目标键匹配:%(lock)s != %(target)s" #, python-format msgid "Malformed endpoint URL (%(endpoint)s), see ERROR log for details." msgstr "不正确的端点URL(%(endpoint)s), 查看错误日志获取详情" msgid "Marker could not be found" msgstr "找不到标记符" #, python-format msgid "Max hierarchy depth reached for %s branch." msgstr "已达到 %s 分支的最大层深度。" #, python-format msgid "Maximum lock attempts on %s occurred." msgstr "已达到对 %s 的最大锁定尝试次数。" #, python-format msgid "Member %(member)s is already a member of group %(group)s" msgstr "成员 %(member)s 已属于组 %(group)s" #, python-format msgid "Method not callable: %s" msgstr "方法不可调用:%s" msgid "Missing entity ID from environment" msgstr "环境中缺少实体标识" msgid "" "Modifying \"redelegation_count\" upon redelegation is forbidden. Omitting " "this parameter is advised." msgstr "正在修改“redelegation_count”(当禁止重新委派时)。建议省略此参数。" msgid "Multiple domains are not supported" msgstr "多个域不受支持" msgid "Must be called within an active lock context." msgstr "必须在处于活动状态的锁定上下文内调用。" msgid "Must specify either domain or project" msgstr "必须指定 domain 或 project" msgid "Name field is required and cannot be empty" msgstr "名称字段是必填字段,不能为空" msgid "Neither Project Domain ID nor Project Domain Name was provided." msgstr "既未提供项目域标识,也未提供项目域名。" msgid "" "No Authorization headers found, cannot proceed with OAuth related calls, if " "running under HTTPd or Apache, ensure WSGIPassAuthorization is set to On." msgstr "" "找不到任何授权头,无法继续进行与 OAuth 相关的调用,如果是通过 HTTP 或 Apache " "运行,请确保 WSGIPassAuthorization 设置为开启。" msgid "No authenticated user" msgstr "不存在任何已认证的用户" msgid "" "No encryption keys found; run keystone-manage fernet_setup to bootstrap one." msgstr "" "找不到任何加密密钥;请针对引导程序 1 运行 keystone-manage fernet_setup。" msgid "No options specified" msgstr "无选项指定" #, python-format msgid "No policy is associated with endpoint %(endpoint_id)s." msgstr "没有任何策略与端点 %(endpoint_id)s 关联。" #, python-format msgid "No remaining uses for trust: %(trust_id)s" msgstr "对于信任 %(trust_id)s,不存在其余使用" msgid "No token in the request" msgstr "请求中没有令牌。" msgid "Non-default domain is not supported" msgstr "非缺省域不受支持" msgid "One of the trust agents is disabled or deleted" msgstr "其中一个信任代理已禁用或删除" #, python-format msgid "" "Option %(option)s found with no group specified while checking domain " "configuration request" msgstr "在检查域配置请求时,找到选项 %(option)s,但未指定任何组" #, python-format msgid "" "Option %(option)s in group %(group)s is not supported for domain specific " "configurations" msgstr "特定于域的配置不支持组 %(group)s 中的选项 %(option)s" #, python-format msgid "Project (%s)" msgstr "项目 (%s)" #, python-format msgid "Project ID not found: %(t_id)s" msgstr "找不到项目标识:%(t_id)s" msgid "Project field is required and cannot be empty." msgstr "项目字段是必填字段,不得为空。" #, python-format msgid "Project is disabled: %s" msgstr "项目已禁用:%s" msgid "Project name cannot contain reserved characters." msgstr "项目名称不能包含保留字符。" msgid "Query string is not UTF-8 encoded" msgstr "查询字符串不是采用 UTF-8 编码" #, python-format msgid "" "Reading the default for option %(option)s in group %(group)s is not supported" msgstr "系统不支持读取组 %(group)s 中的选项 %(option)s 的缺省值。" msgid "Redelegation allowed for delegated by trust only" msgstr "仅允许对“委派者”信任进行重新委派" #, python-format msgid "" "Remaining redelegation depth of %(redelegation_depth)d out of allowed range " "of [0..%(max_count)d]" msgstr "" "其余重新委派深度 %(redelegation_depth)d 超出允许的范围 [0..%(max_count)d]" msgid "" "Remove admin_crud_extension from the paste pipeline, the admin_crud " "extension is now always available. Updatethe [pipeline:admin_api] section in " "keystone-paste.ini accordingly, as it will be removed in the O release." msgstr "" "从粘贴管道移除 admin_crud_extension,admin_crud 扩展现在始终可用。对 " "keystone-paste.ini 中的 [pipeline:admin_api] 节进行相应更新,因为它将会在 O " "发行版中移除。" msgid "" "Remove endpoint_filter_extension from the paste pipeline, the endpoint " "filter extension is now always available. Update the [pipeline:api_v3] " "section in keystone-paste.ini accordingly as it will be removed in the O " "release." msgstr "" "从粘贴管道移除 endpoint_filter_extension,端点过滤器扩展现在始终可用。对 " "keystone-paste.ini 中的 [pipeline:api_v3] 节进行相应更新,因为它将会在 O 发" "行版中移除。" msgid "" "Remove federation_extension from the paste pipeline, the federation " "extension is now always available. Update the [pipeline:api_v3] section in " "keystone-paste.ini accordingly, as it will be removed in the O release." msgstr "" "从粘贴管道移除 federation_extension,联合扩展现在始终可用。对 keystone-paste." "ini 中的 [pipeline:api_v3] 节进行相应更新,因为它将会在 O 发行版中移除。" msgid "" "Remove oauth1_extension from the paste pipeline, the oauth1 extension is now " "always available. Update the [pipeline:api_v3] section in keystone-paste.ini " "accordingly, as it will be removed in the O release." msgstr "" "从粘贴管道移除 oauth1_extension,oauth1 扩展现在始终可用。对 keystone-paste." "ini 中的 [pipeline:api_v3] 节进行相应更新,因为它将会在 O 发行版中移除。" msgid "" "Remove revoke_extension from the paste pipeline, the revoke extension is now " "always available. Update the [pipeline:api_v3] section in keystone-paste.ini " "accordingly, as it will be removed in the O release." msgstr "" "从粘贴管道移除 revoke_extension,撤销扩展现在始终可用。对 keystone-paste.ini " "中的 [pipeline:api_v3] 节进行相应更新,因为它将会在 O 发行版中移除。" msgid "" "Remove simple_cert from the paste pipeline, the PKI and PKIz token providers " "are now deprecated and simple_cert was only used insupport of these token " "providers. Update the [pipeline:api_v3] section in keystone-paste.ini " "accordingly, as it will be removed in the O release." msgstr "" "从粘贴管道移除 simple_cert,现在已不推荐使用 PKI 和 PKIz 令牌,simple_cert 仅" "用于支持这些令牌提供程序。对 keystone-paste.ini 中的 [pipeline:api_v3] 节进行" "相应更新,因为它将会在 O 发行版中移除。" msgid "" "Remove user_crud_extension from the paste pipeline, the user_crud extension " "is now always available. Updatethe [pipeline:public_api] section in keystone-" "paste.ini accordingly, as it will be removed in the O release." msgstr "" "从粘贴管道移除 user_crud_extension,user_crud 扩展现在始终可用。对 keystone-" "paste.ini 中的 [pipeline:admin_api] 节进行相应更新,因为它将会在 O 发行版中移" "除。" msgid "Request Token does not have an authorizing user id" msgstr "请求令牌没有授权用户标识" #, python-format msgid "" "Request attribute %(attribute)s must be less than or equal to %(size)i. The " "server could not comply with the request because the attribute size is " "invalid (too large). The client is assumed to be in error." msgstr "" "请求属性 %(attribute)s 必须小于或等于 %(size)i。服务器未能遵照请求,因为属性" "大小无效(太大)。客户机被认为发生错误。" msgid "Request must have an origin query parameter" msgstr "请求必须具有源查询参数" msgid "Request token is expired" msgstr "请求令牌已过期" msgid "Request token not found" msgstr "找不到请求令牌" msgid "Requested expiration time is more than redelegated trust can provide" msgstr "请求的到期时间超过重新委派的信任可提供的到期时间" #, python-format msgid "" "Requested redelegation depth of %(requested_count)d is greater than allowed " "%(max_count)d" msgstr "请求的重新委派深度 %(requested_count)d 超过允许的 %(max_count)d" msgid "" "Running keystone via eventlet is deprecated as of Kilo in favor of running " "in a WSGI server (e.g. mod_wsgi). Support for keystone under eventlet will " "be removed in the \"M\"-Release." msgstr "" "自 Kilo 开始,建议不要通过 eventlet 运行 keystone,改为在 WSGI 服务器(例如 " "mod_wsgi)中运行。在“M”发行版中,将移除对在 eventlet 下运行 keystone 的支持。" msgid "Scoping to both domain and project is not allowed" msgstr "不允许同时将作用域限定到域和项目" msgid "Scoping to both domain and trust is not allowed" msgstr "不允许同时将作用域限定到域和信任" msgid "Scoping to both project and trust is not allowed" msgstr "不允许同时将作用域限定到项目和信任" #, python-format msgid "Service Provider %(sp)s is disabled" msgstr "服务提供程序 %(sp)s 已禁用" msgid "Some of requested roles are not in redelegated trust" msgstr "某些所请求角色未在重新委派的信任中" msgid "Specify a domain or project, not both" msgstr "请指定域或项目,但不是同时指定这两者" msgid "Specify a user or group, not both" msgstr "请指定用户或组,但不是同时指定这两者" msgid "Specify one of domain or project" msgstr "请指定域或项目" msgid "Specify one of user or group" msgstr "请指定用户或组" #, python-format msgid "" "String length exceeded.The length of string '%(string)s' exceeded the limit " "of column %(type)s(CHAR(%(length)d))." msgstr "" "字符串长度过长.字符串'%(string)s' 的长度超过列限制 %(type)s(字符" "(%(length)d))." msgid "Tenant name cannot contain reserved characters." msgstr "租户名称不能包含保留字符。" #, python-format msgid "" "The %s extension has been moved into keystone core and as such its " "migrations are maintained by the main keystone database control. Use the " "command: keystone-manage db_sync" msgstr "" "%s 扩展已移至 keystone 核心,因此,其迁移由主 keystone 数据库控件维护。使用以" "下命令:keystone-manage db_sync" msgid "" "The 'expires_at' must not be before now. The server could not comply with " "the request since it is either malformed or otherwise incorrect. The client " "is assumed to be in error." msgstr "" "“expires_at”不得早于现在。服务器未能遵从请求,因为它的格式不正确,或者其他方" "面不正确。客户机被认为发生错误。" msgid "The --all option cannot be used with the --domain-name option" msgstr "--all 选项不能与 --domain-name 选项配合使用" #, python-format msgid "The Keystone configuration file %(config_file)s could not be found." msgstr "找不到 Keystone 配置文件 %(config_file)s。" #, python-format msgid "" "The Keystone domain-specific configuration has specified more than one SQL " "driver (only one is permitted): %(source)s." msgstr "" "特定于 Keystone 域的配置已指定多个 SQL 驱动程序(仅允许指定一个):" "%(source)s。" msgid "The action you have requested has not been implemented." msgstr "您请求的操作暂未被执行" msgid "The authenticated user should match the trustor." msgstr "认证用户应匹配信任者。" msgid "" "The certificates you requested are not available. It is likely that this " "server does not use PKI tokens otherwise this is the result of " "misconfiguration." msgstr "" "已请求的证书不可用。可能此服务器未使用 PKI 令牌,或者这是因为配置错误。" msgid "The configured token provider does not support bind authentication." msgstr "所配置的令牌提供者不支持绑定认证。" msgid "The creation of projects acting as domains is not allowed in v2." msgstr "不允许在 v2 中创建充当域的项目。" #, python-format msgid "" "The password length must be less than or equal to %(size)i. The server could " "not comply with the request because the password is invalid." msgstr "密码长度必须小于或等于 %(size)i。服务器未能遵照请求,因为密码无效。" msgid "The request you have made requires authentication." msgstr "你的请求需要先授权" msgid "The resource could not be found." msgstr "找不到该资源。" msgid "" "The revoke call must not have both domain_id and project_id. This is a bug " "in the Keystone server. The current request is aborted." msgstr "" "撤销调用不能同时具有 domain_id 和 project_id。这是 Keystone 服务器中的错误。" "当前请求已异常中止。" msgid "The service you have requested is no longer available on this server." msgstr "在此服务器上,已请求的服务不再可用。" #, python-format msgid "" "The specified parent region %(parent_region_id)s would create a circular " "region hierarchy." msgstr "指定的父区域 %(parent_region_id)s 将创建循环区域层次结构。" #, python-format msgid "" "The value of group %(group)s specified in the config should be a dictionary " "of options" msgstr "在配置中指定的组 %(group)s 的值应该是选项的字典" msgid "There should not be any non-oauth parameters" msgstr "不应该存在任何非 oauth 参数" #, python-format msgid "This is not a recognized Fernet payload version: %s" msgstr "这不是可识别的 Fernet 有效内容版本:%s" #, python-format msgid "This is not a recognized Fernet token %s" msgstr "这不是可识别的 Fernet 令牌 %s" msgid "" "Timestamp not in expected format. The server could not comply with the " "request since it is either malformed or otherwise incorrect. The client is " "assumed to be in error." msgstr "" "时间戳记未采用所需格式。服务器未能遵照请求,因为它的格式或者其他方面不正确。" "客户机被认为发生错误。" #, python-format msgid "" "To get a more detailed information on this error, re-run this command for " "the specific domain, i.e.: keystone-manage domain_config_upload --domain-" "name %s" msgstr "" "要获取有关此错误的更详细信息,请针对特定域重新运行此命令,即:keystone-" "manage domain_config_upload --domain-name %s" msgid "Token belongs to another user" msgstr "令牌属于另一用户" msgid "Token does not belong to specified tenant." msgstr "令牌不属于指定的租户。" msgid "Token version is unrecognizable or unsupported." msgstr "令牌版本不可识别或者不受支持。" msgid "Trustee has no delegated roles." msgstr "托管人没有任何已委派的角色。" msgid "Trustor is disabled." msgstr "Trustor被禁用" #, python-format msgid "" "Trying to update group %(group)s, so that, and only that, group must be " "specified in the config" msgstr "正在尝试更新组 %(group)s,因此仅存在以下要求:必须在配置中指定组" #, python-format msgid "" "Trying to update option %(option)s in group %(group)s, but config provided " "contains option %(option_other)s instead" msgstr "" "正在尝试更新组 %(group)s 中的选项 %(option)s,但所提供配置反而包含选项 " "%(option_other)s" #, python-format msgid "" "Trying to update option %(option)s in group %(group)s, so that, and only " "that, option must be specified in the config" msgstr "" "正在尝试更新组 %(group)s 中的选项 %(option)s,因此仅存在以下要求:必须在配置" "中指定选项" msgid "" "Unable to access the keystone database, please check it is configured " "correctly." msgstr "无法访问 keystone 数据库,请检查它是否正确配置。" #, python-format msgid "Unable to consume trust %(trust_id)s, unable to acquire lock." msgstr "无法使用信任 %(trust_id)s,无法获取锁定。" #, python-format msgid "" "Unable to delete region %(region_id)s because it or its child regions have " "associated endpoints." msgstr "无法删除区域 %(region_id)s,因为它或它的子区域具有关联的端点。" msgid "Unable to downgrade schema" msgstr "无法对模式进行降级" #, python-format msgid "Unable to find valid groups while using mapping %(mapping_id)s" msgstr "使用映射 %(mapping_id)s 时,找不到有效组" #, python-format msgid "Unable to locate domain config directory: %s" msgstr "找不到指定的域配置目录:%s" #, python-format msgid "Unable to lookup user %s" msgstr "无法查找用户 %s" #, python-format msgid "" "Unable to reconcile identity attribute %(attribute)s as it has conflicting " "values %(new)s and %(old)s" msgstr "无法协调身份属性 %(attribute)s,因为它具有冲突值%(new)s 和 %(old)s" #, python-format msgid "" "Unable to sign SAML assertion. It is likely that this server does not have " "xmlsec1 installed, or this is the result of misconfiguration. Reason " "%(reason)s" msgstr "" "无法对 SAML 断言进行签名。此服务器可能未安装 xmlsec1,或者这可能是由于配置错" "误导致的。原因 %(reason)s" msgid "Unable to sign token." msgstr "无法对令牌进行签名。" #, python-format msgid "Unexpected assignment type encountered, %s" msgstr "遇到意外的指派类型 %s" #, python-format msgid "" "Unexpected combination of grant attributes - User: %(user_id)s, Group: " "%(group_id)s, Project: %(project_id)s, Domain: %(domain_id)s" msgstr "" "存在以下 grant 属性的意外组合 - 用户 %(user_id)s、组 %(group_id)s、项目 " "%(project_id)s 和域 %(domain_id)s" #, python-format msgid "Unexpected status requested for JSON Home response, %s" msgstr "请求 JSON 主页响应时处于意外状态,%s" msgid "Unknown Target" msgstr "目标未知" #, python-format msgid "Unknown domain '%(name)s' specified by --domain-name" msgstr "--domain-name 指定的“%(name)s”是未知域" #, python-format msgid "Unknown token version %s" msgstr "令牌版本 %s 未知" #, python-format msgid "Unregistered dependency: %(name)s for %(targets)s" msgstr "已针对 %(targets)s 注销依赖关系 %(name)s" msgid "Update of `domain_id` is not allowed." msgstr "不允许更新“domain_id”。" msgid "Update of `is_domain` is not allowed." msgstr "不允许更新“is_domain”。" msgid "Update of `parent_id` is not allowed." msgstr "不允许更新“parent_id”。" msgid "Update of domain_id is only allowed for root projects." msgstr "只允许更新根项目的 domain_id。" msgid "Update of domain_id of projects acting as domains is not allowed." msgstr "不允许更新充当域的项目的 domain_id。" msgid "Use a project scoped token when attempting to create a SAML assertion" msgstr "当尝试创建 SAML 断言时,请使用项目范围的令牌" msgid "" "Use of the identity driver config to automatically configure the same " "assignment driver has been deprecated, in the \"O\" release, the assignment " "driver will need to be expicitly configured if different than the default " "(SQL)." msgstr "" "已不推荐使用标识驱动程序配置来自动配置同一分配驱动程序,在“O”发行版中,如果不" "同于缺省值 (SQL),那么需要显式配置分配驱动程序。" #, python-format msgid "User %(u_id)s is unauthorized for tenant %(t_id)s" msgstr "没有授权给用户%(u_id)s项目%(t_id)s的权限" #, python-format msgid "User %(user_id)s has no access to domain %(domain_id)s" msgstr "用户%(user_id)s对域%(domain_id)s没有任何访问权限" #, python-format msgid "User %(user_id)s has no access to project %(project_id)s" msgstr "用户%(user_id)s 没有访问项目 %(project_id)s的权限" #, python-format msgid "User %(user_id)s is already a member of group %(group_id)s" msgstr "用户%(user_id)s 已是组 %(group_id)s 的成员" #, python-format msgid "User '%(user_id)s' not found in group '%(group_id)s'" msgstr "在组“%(group_id)s”中找不到用户“%(user_id)s”" msgid "User IDs do not match" msgstr "用户ID不匹配" msgid "" "User auth cannot be built due to missing either user id, or user name with " "domain id, or user name with domain name." msgstr "" "由于缺少用户标识、具有域标识的用户名或者具有域名的用户名,因此无法构建用户认" "证。" #, python-format msgid "User is disabled: %s" msgstr "用户已禁用:%s" msgid "User is not a member of the requested project" msgstr "用户不是所请求项目的成员" msgid "User is not a trustee." msgstr "用户不是受托人。" msgid "User not found" msgstr "找不到用户" msgid "User not valid for tenant." msgstr "用户做为租户是无效的。" msgid "User roles not supported: tenant_id required" msgstr "用户角色不受支持:需要 tenant_id" #, python-format msgid "User type %s not supported" msgstr "用户类型 %s 不受支持" msgid "You are not authorized to perform the requested action." msgstr "您没有授权完成所请求的操作。" #, python-format msgid "You are not authorized to perform the requested action: %(action)s" msgstr "您无权执行请求的操作:%(action)s" msgid "" "You have tried to create a resource using the admin token. As this token is " "not within a domain you must explicitly include a domain for this resource " "to belong to." msgstr "" "您已尝试使用管理员令牌创建资源。因为此令牌不在域中,所以您必须显式添加域以使" "此资源成为其成员。" msgid "`key_mangler` functions must be callable." msgstr "“key_mangler”函数必须可调用。" msgid "`key_mangler` option must be a function reference" msgstr "“key_mangler”选项必须为函数引用" msgid "any options" msgstr "任何选项" msgid "auth_type is not Negotiate" msgstr "auth_type 不是“Negotiate”" msgid "authorizing user does not have role required" msgstr "授权用户没有必需的角色" #, python-format msgid "cannot create a project in a branch containing a disabled project: %s" msgstr "无法在包含已禁用项目的分支中创建项目:%s" #, python-format msgid "" "cannot delete an enabled project acting as a domain. Please disable the " "project %s first." msgstr "无法删除充当域的已启用项目。请先禁用项目 %s。" #, python-format msgid "group %(group)s" msgstr "组 %(group)s" msgid "" "idp_contact_type must be one of: [technical, other, support, administrative " "or billing." msgstr "" "idp_contact_type 必须是下列其中一项:technical、other、support、" "administrative 或 billing。" #, python-format msgid "invalid date format %s" msgstr "日期格式 %s 无效" #, python-format msgid "" "it is not permitted to have two projects acting as domains with the same " "name: %s" msgstr "不允许两个同名项目充当域:%s" #, python-format msgid "" "it is not permitted to have two projects within a domain with the same " "name : %s" msgstr "不允许一个域的两个项目具有相同名称:%s" msgid "only root projects are allowed to act as domains." msgstr "只允许根项目充当域。" #, python-format msgid "option %(option)s in group %(group)s" msgstr "组 %(group)s 中的选项 %(option)s" msgid "provided consumer key does not match stored consumer key" msgstr "提供的用户密钥与存储的用户密钥不符" msgid "provided request key does not match stored request key" msgstr "提供的请求密钥与存储的请求密钥不匹配" msgid "provided verifier does not match stored verifier" msgstr "提供的验证器与存储的验证器不匹配" msgid "remaining_uses must be a positive integer or null." msgstr "remaining_uses 必须为正整数或 Null。" msgid "remaining_uses must not be set if redelegation is allowed" msgstr "如果允许重新委派,那么不能设置 remaining_uses" #, python-format msgid "" "request to update group %(group)s, but config provided contains group " "%(group_other)s instead" msgstr "请求更新组 %(group)s,但所提供配置反而包含组 %(group_other)s" msgid "rescope a scoped token" msgstr "请重新确定带范围的令牌的范围" #, python-format msgid "role %s is not defined" msgstr "未定义角色 %s" msgid "scope.project.id must be specified if include_subtree is also specified" msgstr "如果还指定了 include_subtree,那么必须指定 scope.project.id" #, python-format msgid "tls_cacertdir %s not found or is not a directory" msgstr "tls_cacertdir %s 未找到或者不是一个目录" #, python-format msgid "tls_cacertfile %s not found or is not a file" msgstr "tls_cacertfile %s 未找到或者不是一个文件" #, python-format msgid "token reference must be a KeystoneToken type, got: %s" msgstr "令牌引用必须为 KeystoneToken 类型,但收到:%s" msgid "" "update of domain_id is deprecated as of Mitaka and will be removed in O." msgstr "从 Mitaka 开始,已不推荐更新 domain_id,将在 O 发行版中移除。" #, python-format msgid "" "validated expected to find %(param_name)r in function signature for " "%(func_name)r." msgstr "已验证期望在 %(func_name)r 的函数特征符中查找 %(param_name)r。" keystone-9.0.0/keystone/locale/es/0000775000567000056710000000000012701407246020234 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/locale/es/LC_MESSAGES/0000775000567000056710000000000012701407246022021 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/locale/es/LC_MESSAGES/keystone-log-critical.po0000664000567000056710000000155112701407102026562 0ustar jenkinsjenkins00000000000000# Translations template for keystone. # Copyright (C) 2015 OpenStack Foundation # This file is distributed under the same license as the keystone project. # # Translators: # OpenStack Infra , 2015. #zanata msgid "" msgstr "" "Project-Id-Version: keystone 9.0.0.0b2.dev256\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n" "POT-Creation-Date: 2016-01-18 05:50+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2014-08-31 03:19+0000\n" "Last-Translator: openstackjenkins \n" "Language: es\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Spanish\n" #, python-format msgid "Unable to open template file %s" msgstr "No se puede abrir el archivo de plantilla %s" keystone-9.0.0/keystone/locale/es/LC_MESSAGES/keystone.po0000664000567000056710000016043412701407105024224 0ustar jenkinsjenkins00000000000000# Translations template for keystone. # Copyright (C) 2015 OpenStack Foundation # This file is distributed under the same license as the keystone project. # # Translators: # Alberto Molina Coballes , 2014 # dario hereñu , 2015 # Guillermo Vitas Gil , 2014 # Jose Enrique Ruiz Navarro , 2014 # Jose Ramirez Garcia , 2014 # Pablo Sanchez , 2015 # OpenStack Infra , 2015. #zanata # Tom Cocozzello , 2015. #zanata # Eugènia Torrella , 2016. #zanata # Tom Cocozzello , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: keystone 9.0.0.0rc2.dev8\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n" "POT-Creation-Date: 2016-03-24 10:41+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-24 10:55+0000\n" "Last-Translator: Eugènia Torrella \n" "Language: es\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Spanish\n" #, python-format msgid "%(detail)s" msgstr "%(detail)s" #, python-format msgid "%(driver)s is not supported driver version" msgstr "%(driver)s no es una versión de controlador no soportada" #, python-format msgid "" "%(entity)s name cannot contain the following reserved characters: %(chars)s" msgstr "" "El nombre %(entity)s no puede contener los siguientes caracteres " "reservados: %(chars)s" #, python-format msgid "" "%(event)s is not a valid notification event, must be one of: %(actions)s" msgstr "" "%(event)s no es u suceso de notificación válido, debe ser uno de: %(actions)s" #, python-format msgid "%(host)s is not a trusted dashboard host" msgstr "%(host)s no es un host de panel de control de confianza" #, python-format msgid "%(message)s %(amendment)s" msgstr "%(message)s %(amendment)s" #, python-format msgid "" "%(mod_name)s doesn't provide database migrations. The migration repository " "path at %(path)s doesn't exist or isn't a directory." msgstr "" "%(mod_name)s no proporciona migración de base de datos. La vía de acceso de " "repositorio de migración en %(path)s no existe o no es un directorio." #, python-format msgid "%(prior_role_id)s does not imply %(implied_role_id)s" msgstr "%(prior_role_id)s no implica %(implied_role_id)s" #, python-format msgid "%(property_name)s cannot be less than %(min_length)s characters." msgstr "%(property_name)s no puede tener menos de %(min_length)s caracteres." #, python-format msgid "%(property_name)s is not a %(display_expected_type)s" msgstr "%(property_name)s no es una %(display_expected_type)s" #, python-format msgid "%(property_name)s should not be greater than %(max_length)s characters." msgstr "%(property_name)s no debe tener más de %(max_length)s caracteres." #, python-format msgid "%(role_id)s cannot be an implied roles" msgstr "%(role_id)s no puede ser un rol implicado" #, python-format msgid "%s cannot be empty." msgstr "%s no puede estar vacío." #, python-format msgid "%s extension does not exist." msgstr "La extensión %s no existe." #, python-format msgid "%s field is required and cannot be empty" msgstr "campo %s es necesario y no puede estar vacío" #, python-format msgid "%s field(s) cannot be empty" msgstr "%s campo(s) no puede estar vacío" #, python-format msgid "" "%s for the LDAP identity backend has been deprecated in the Mitaka release " "in favor of read-only identity LDAP access. It will be removed in the \"O\" " "release." msgstr "" "El programa de fondo de identidad LDAP %s se ha dejado en desuso en el " "release de Mitaka, sustituyéndolo por un acceso LDAP de identidad de solo " "lectura. Se eliminará en el release \"O\"." msgid "(Disable insecure_debug mode to suppress these details.)" msgstr "(Inhabilite la modalidad insecure_debug para suprimir estos detalles.)" msgid "--all option cannot be mixed with other options" msgstr "La opción --all no puede mezclarse con otras opciones" msgid "A project-scoped token is required to produce a service catalog." msgstr "" "Se necesita una señal con ámbito de proyecto para producir un catálogo de " "servicio." msgid "Access token is expired" msgstr "El token de acceso ha expirado" msgid "Access token not found" msgstr "No se ha encontrado el token de acceso" msgid "Additional authentications steps required." msgstr "Se precisan pasos adicionales de autenticación." msgid "An unexpected error occurred when retrieving domain configs" msgstr "" "Se ha producido un error inesperado al recuperar configuraciones de dominio" #, python-format msgid "An unexpected error occurred when trying to store %s" msgstr "Un error inesperado ocurrió cuando se intentaba almacenar %s" msgid "An unexpected error prevented the server from fulfilling your request." msgstr "" "El servidor no ha podido completar su petición debido a un error inesperado." #, python-format msgid "" "An unexpected error prevented the server from fulfilling your request: " "%(exception)s" msgstr "" "Un error inesperado a impedido que el servidor complete su solicitud: " "%(exception)s" msgid "An unhandled exception has occurred: Could not find metadata." msgstr "" "Se ha producido una excepción no manejada: no se han podido encontrar los " "metadatos." msgid "At least one option must be provided" msgstr "Debe especificar al menos una opción" msgid "At least one option must be provided, use either --all or --domain-name" msgstr "Debe proporcionarse al menos una opción, utilice --all o --domain-name" msgid "At least one role should be specified." msgstr "Al menos debe especificarse un rol" #, python-format msgid "" "Attempted automatic driver selection for assignment based upon " "[identity]\\driver option failed since driver %s is not found. Set " "[assignment]/driver to a valid driver in keystone config." msgstr "" "Se ha intentado la seleción automática de controlador para la asignación en " "base a la opción [identity]\\driver, pero ha fallado porque no se encuentra " "el controlador %s. Defina [assignment]/driver con un controlador válido en " "la configuración de keystone." msgid "Attempted to authenticate with an unsupported method." msgstr "Se ha intentado autenticar con un método no compatible." msgid "" "Attempting to use OS-FEDERATION token with V2 Identity Service, use V3 " "Authentication" msgstr "" "Intentando utilizar la señal OS-FEDERATION con el servicio de identidad V2, " "utilice la autenticación V3 ." msgid "Authentication plugin error." msgstr "Error en el complemento de autenticación " #, python-format msgid "" "Backend `%(backend)s` is not a valid memcached backend. Valid backends: " "%(backend_list)s" msgstr "" "El programa de fondo `%(backend)s` no es un programa de fondo almacenado en " "caché válido. Programas de fondo válidos: %(backend_list)s" msgid "Cannot authorize a request token with a token issued via delegation." msgstr "" "No se puede autorizar una señal de solicitud con una señal emitida mediante " "delegación." #, python-format msgid "Cannot change %(option_name)s %(attr)s" msgstr "No se puede cambiar %(option_name)s %(attr)s" msgid "Cannot change Domain ID" msgstr "No se puede cambiar el ID del Dominio" msgid "Cannot change user ID" msgstr "No se puede cambiar el ID de usuario" msgid "Cannot change user name" msgstr "No se puede cambiar el nombre de usuario" #, python-format msgid "Cannot create an endpoint with an invalid URL: %(url)s" msgstr "No se puede crear un punto final con el URL no válido: %(url)s" #, python-format msgid "Cannot create project with parent: %(project_id)s" msgstr "No se puede crear el proyecto con padre: %(project_id)s" #, python-format msgid "" "Cannot create project, since it specifies its owner as domain %(domain_id)s, " "but specifies a parent in a different domain (%(parent_domain_id)s)." msgstr "" "No se puede crear el proyecto porque especifica su propietario como dominio " "%(domain_id)s, pero especifica un padre en otro dominio distinto " "(%(parent_domain_id)s)." #, python-format msgid "" "Cannot create project, since its parent (%(domain_id)s) is acting as a " "domain, but project's specified parent_id (%(parent_id)s) does not match " "this domain_id." msgstr "" "No se puede crear el proyecto porque su padre (%(domain_id)s) actúa como " "dominio, pero el parent_id especificado en el proyecto, (%(parent_id)s), no " "coincide con este domain_id." msgid "Cannot delete a domain that is enabled, please disable it first." msgstr "" "No se puede suprimir un dominio que está habilitado, antes debe " "inhabilitarlo." #, python-format msgid "" "Cannot delete project %(project_id)s since its subtree contains enabled " "projects." msgstr "" "No se puede suprimir el proyecto %(project_id)s porque su subárbol contiene " "proyectos habilitados." #, python-format msgid "" "Cannot delete the project %s since it is not a leaf in the hierarchy. Use " "the cascade option if you want to delete a whole subtree." msgstr "" "No se puede suprimir el proyecto %s porque no es una hoja en la jerarquía. " "Utilice la opción de casacada si desea suprimir un subárbol entero." #, python-format msgid "" "Cannot disable project %(project_id)s since its subtree contains enabled " "projects." msgstr "" "No se puede inhabilitar el proyecto %(project_id)s porque su subárbol " "contiene proyectos habilitados." #, python-format msgid "Cannot enable project %s since it has disabled parents" msgstr "" "No se puede habilitar el proyecto %s, ya que tiene padres inhabilitados" msgid "Cannot list assignments sourced from groups and filtered by user ID." msgstr "" "No se pueden enumerar las asignaciones obtenidas de grupos y filtradas por " "ID de usuario." msgid "Cannot list request tokens with a token issued via delegation." msgstr "" "No se pueden listar las señales de solicitud con una señal emitida mediante " "delegación." #, python-format msgid "Cannot open certificate %(cert_file)s. Reason: %(reason)s" msgstr "No se puede abrir el certificado %(cert_file)s. Razón: %(reason)s" #, python-format msgid "Cannot remove role that has not been granted, %s" msgstr "No se puede eliminar un rol que no se ha otorgado, %s" msgid "" "Cannot truncate a driver call without hints list as first parameter after " "self " msgstr "" "No se puede truncar una llamada de controlador si lista de sugerencias como " "primer parámetro después de self " msgid "Cannot update domain_id of a project that has children." msgstr "No se puede actualizar el domain_id de un proyecto que tenga hijos." msgid "" "Cannot use parents_as_list and parents_as_ids query params at the same time." msgstr "" "No se pueden utilizar los parámetros de consulta parents_as_list y " "parents_as_ids al mismo tiempo." msgid "" "Cannot use subtree_as_list and subtree_as_ids query params at the same time." msgstr "" "No se pueden utilizar los parámetros de consulta subtree_as_list y " "subtree_as_ids al mismo tiempo." msgid "Cascade update is only allowed for enabled attribute." msgstr "" "Solo se permite la actualización en casacada de los atributos habilitados." msgid "" "Combining effective and group filter will always result in an empty list." msgstr "" "La combinación de filtro de grupo y efectivo dará siempre como resultado una " "lista vacía." msgid "" "Combining effective, domain and inherited filters will always result in an " "empty list." msgstr "" "La combinación de filtros heredados, de dominio y efectivos dará siempre " "como resultado una lista vacía." #, python-format msgid "Config API entity at /domains/%s/config" msgstr "Entidad de API de config en /domains/%s/config" #, python-format msgid "Conflict occurred attempting to store %(type)s - %(details)s" msgstr "Ha ocurrido un conflicto al intentar almacenar %(type)s - %(details)s" #, python-format msgid "Conflicting region IDs specified: \"%(url_id)s\" != \"%(ref_id)s\"" msgstr "" "Se han especificado ID de región conflictivos: \"%(url_id)s\" != \"%(ref_id)s" "\"" msgid "Consumer not found" msgstr "No se ha encontrado el consumidor" #, python-format msgid "" "Could not change immutable attribute(s) '%(attributes)s' in target %(target)s" msgstr "" "No se pueden cambiar atributos inalterables '%(attributes)s' en el destino " "%(target)s" #, python-format msgid "" "Could not determine Identity Provider ID. The configuration option " "%(issuer_attribute)s was not found in the request environment." msgstr "" "No se ha podido determinar el ID del proveedor de identidades. La opción de " "configuración %(issuer_attribute)s no se ha encontrado en el entorno de la " "solicitud." #, python-format msgid "" "Could not find %(group_or_option)s in domain configuration for domain " "%(domain_id)s" msgstr "" "No se ha podido encontrar %(group_or_option)s en la configuración de dominio " "para el dominio %(domain_id)s" #, python-format msgid "Could not find Endpoint Group: %(endpoint_group_id)s" msgstr "No se ha encontrado un grupo de puntos finales: %(endpoint_group_id)s" msgid "Could not find Identity Provider identifier in environment" msgstr "" "No se ha podido encontrar el identificador del proveedor de identidad en el " "entorno" #, python-format msgid "Could not find Identity Provider: %(idp_id)s" msgstr "No se ha podido encontrar el proveedor de identidad: %(idp_id)s" #, python-format msgid "Could not find Service Provider: %(sp_id)s" msgstr "No se ha podido encontrar el proveedor de servicios: %(sp_id)s" #, python-format msgid "Could not find credential: %(credential_id)s" msgstr "No se ha podido encontrar la credencial: %(credential_id)s" #, python-format msgid "Could not find domain: %(domain_id)s" msgstr "No se ha podido encontrar el dominio: %(domain_id)s" #, python-format msgid "Could not find endpoint: %(endpoint_id)s" msgstr "No se ha podido encontrar : %(endpoint_id)s" #, python-format msgid "" "Could not find federated protocol %(protocol_id)s for Identity Provider: " "%(idp_id)s" msgstr "" "No se ha podido encontrar el protocolo federado %(protocol_id)s para el " "proveedor de identidad: %(idp_id)s" #, python-format msgid "Could not find group: %(group_id)s" msgstr "No se ha podido encontrar el grupo: %(group_id)s" #, python-format msgid "Could not find mapping: %(mapping_id)s" msgstr "No se ha podido encontrar la correlación: %(mapping_id)s" msgid "Could not find policy association" msgstr "No se ha encontrado una asociación de política" #, python-format msgid "Could not find policy: %(policy_id)s" msgstr "No se ha podido encontrar : %(policy_id)s" #, python-format msgid "Could not find project: %(project_id)s" msgstr "No se ha podido encontrar el proyecto: %(project_id)s" #, python-format msgid "Could not find region: %(region_id)s" msgstr "No se ha podido encontrar la región: %(region_id)s" #, python-format msgid "" "Could not find role assignment with role: %(role_id)s, user or group: " "%(actor_id)s, project or domain: %(target_id)s" msgstr "" "No se ha podido encontrar la asignación de roles con el rol: %(role_id)s, " "usuario o grupo: %(actor_id)s, proyecto o dominio: %(target_id)s" #, python-format msgid "Could not find role: %(role_id)s" msgstr "No se ha podido encontrar el rol: %(role_id)s" #, python-format msgid "Could not find service: %(service_id)s" msgstr "No se ha podido encontrar el servicio: %(service_id)s" #, python-format msgid "Could not find token: %(token_id)s" msgstr "No se ha podido encontrar la señal: %(token_id)s" #, python-format msgid "Could not find trust: %(trust_id)s" msgstr "No se ha podido encontrar la confianza: %(trust_id)s" #, python-format msgid "Could not find user: %(user_id)s" msgstr "No se ha podido encontrar el usuario: %(user_id)s" #, python-format msgid "Could not find version: %(version)s" msgstr "No se ha podido encontrar la versión: %(version)s" #, python-format msgid "Could not find: %(target)s" msgstr "No se ha podido encontrar : %(target)s" msgid "" "Could not map any federated user properties to identity values. Check debug " "logs or the mapping used for additional details." msgstr "" "No se ha podido correlacionar ninguna propiedad de usuario federado a valor " "de identidad. Compruebe los registros de depuración o la correlación " "utilizada para otener información más detallada." msgid "" "Could not map user while setting ephemeral user identity. Either mapping " "rules must specify user id/name or REMOTE_USER environment variable must be " "set." msgstr "" "No se ha podido correlacionar el usuario al establecer la identidad de " "usuario efímera. Las reglas de correlación deben especificar ID/nombre de " "usuario o se debe establecer la variable de entorno REMOTE_USER." msgid "Could not validate the access token" msgstr "No se ha podido validar la señal de acceso" msgid "Credential belongs to another user" msgstr "La credencial pertenece a otro usuario" msgid "Credential signature mismatch" msgstr "Discrepancia en la firma de credencial" #, python-format msgid "" "Direct import of auth plugin %(name)r is deprecated as of Liberty in favor " "of its entrypoint from %(namespace)r and may be removed in N." msgstr "" "La importación directa del plug-in de autorización %(name)r está en desuso a " "partir de Liberty, sustituyéndose por su punto de entrada desde " "%(namespace)r y puede que se elimine en N." #, python-format msgid "" "Direct import of driver %(name)r is deprecated as of Liberty in favor of its " "entrypoint from %(namespace)r and may be removed in N." msgstr "" "La importación directa del controlador %(name)r está en desuso a partir de " "Liberty, sustituyéndose por su punto de entrada desde %(namespace)r y puede " "que se elimine en N." msgid "" "Disabling an entity where the 'enable' attribute is ignored by configuration." msgstr "" "Inhabilitando una entidad donde el atributo 'enable' se omite en la " "configuración." #, python-format msgid "Domain (%s)" msgstr "Dominio (%s)" #, python-format msgid "Domain cannot be named %s" msgstr "El dominio no se puede llamar %s" #, python-format msgid "Domain cannot have ID %s" msgstr "El dominio no puede tener el ID %s" #, python-format msgid "Domain is disabled: %s" msgstr "El dominio está inhabilitado: %s" msgid "Domain name cannot contain reserved characters." msgstr "El nombre de dominio no puede contener caracteres reservados." msgid "Domain scoped token is not supported" msgstr "La señal con ámbito de dominio no está soportada" msgid "Domain specific roles are not supported in the V8 role driver" msgstr "El controlador de roles V8 no admite roles específicos de dominio." #, python-format msgid "" "Domain: %(domain)s already has a configuration defined - ignoring file: " "%(file)s." msgstr "" "Dominio: %(domain)s ya tiene definida una configuración - ignorando el " "archivo: %(file)s." msgid "Duplicate Entry" msgstr "Entrada Duplicada " #, python-format msgid "Duplicate ID, %s." msgstr "ID duplicado, %s." #, python-format msgid "Duplicate entry: %s" msgstr "Entrada duplicada: %s" #, python-format msgid "Duplicate name, %s." msgstr "Nombre duplicado, %s." #, python-format msgid "Duplicate remote ID: %s" msgstr "ID remoto duplicado: %s" msgid "EC2 access key not found." msgstr "No se ha encontrado la clave de acceso de EC2." msgid "EC2 signature not supplied." msgstr "No se ha proporcionado la firma de EC2." msgid "" "Either --bootstrap-password argument or OS_BOOTSTRAP_PASSWORD must be set." msgstr "" "Se debe definir el argumento bootstrap-password o bien OS_BOOTSTRAP_PASSWORD." msgid "Enabled field must be a boolean" msgstr "El campo habilitado debe ser un booleano" msgid "Enabled field should be a boolean" msgstr "El campo habilitado debe ser un booleano" #, python-format msgid "Endpoint %(endpoint_id)s not found in project %(project_id)s" msgstr "" "No se ha encontrado el punto final %(endpoint_id)s en el proyecto " "%(project_id)s" msgid "Endpoint Group Project Association not found" msgstr "" "No se ha encontrado la asociación del proyecto del grupo de puntos finales" msgid "Ensure configuration option idp_entity_id is set." msgstr "" "Compruebe que se haya establecido la opción de configuración idp_entity_id." msgid "Ensure configuration option idp_sso_endpoint is set." msgstr "" "Compruebe que se haya establecido la opción de configuración " "idp_sso_endpoint." #, python-format msgid "" "Error parsing configuration file for domain: %(domain)s, file: %(file)s." msgstr "" "Error al analizar el archivo de configuración para el dominio: %(domain)s, " "archivo: %(file)s." #, python-format msgid "Error while opening file %(path)s: %(err)s" msgstr "Error al abrir el archivo %(path)s: %(err)s" #, python-format msgid "Error while parsing line: '%(line)s': %(err)s" msgstr "Error al analizar la línea: '%(line)s': %(err)s" #, python-format msgid "Error while parsing rules %(path)s: %(err)s" msgstr "Error al analizar las reglas %(path)s: %(err)s" #, python-format msgid "Error while reading metadata file, %(reason)s" msgstr "Error al leer el archivo de metadatos, %(reason)s" #, python-format msgid "" "Exceeded attempts to register domain %(domain)s to use the SQL driver, the " "last domain that appears to have had it is %(last_domain)s, giving up" msgstr "" "Se ha superado el número máximo de intentos de registrar un dominio " "%(domain)s para utilizar el controlador SQL, el último dominio que parece " "haberlo tenido es %(last_domain)s, abandonando" #, python-format msgid "Expected dict or list: %s" msgstr "Se espera un diccionario o una lista: %s" msgid "" "Expected signing certificates are not available on the server. Please check " "Keystone configuration." msgstr "" "Los certificados para firmas esperados no están disponibles en el servidor. " "Compruebe la configuración de Keystone." #, python-format msgid "" "Expecting to find %(attribute)s in %(target)s - the server could not comply " "with the request since it is either malformed or otherwise incorrect. The " "client is assumed to be in error." msgstr "" "Esperando encontrar %(attribute)s en %(target)s - el servidor no pudo " "cumplir la solicitud porque está formada incorrectamente o de otra forma es " "incorrecta. El cliente se asume en error." #, python-format msgid "Failed to start the %(name)s server" msgstr "No se ha podido iniciar el servidor %(name)s" msgid "Failed to validate token" msgstr "Ha fallado la validación del token" msgid "Federation token is expired" msgstr "La señal de federación ha caducado" #, python-format msgid "" "Field \"remaining_uses\" is set to %(value)s while it must not be set in " "order to redelegate a trust" msgstr "" "El campo \"remaining_uses\" está establecido en %(value)s, pero no debe " "estar establecido para poder redelegar una confianza" msgid "Found invalid token: scoped to both project and domain." msgstr "" "Se ha encontrado una señal no válida: tiene un ámbito de proyecto y dominio." #, python-format msgid "Group %s not found in config" msgstr "No se ha encontrado el grupo %s en la configuración" #, python-format msgid "Group %(group)s is not supported for domain specific configurations" msgstr "" "El grupo %(group)s no se admite para las configuraciones específicas de " "dominio" #, python-format msgid "" "Group %(group_id)s returned by mapping %(mapping_id)s was not found in the " "backend." msgstr "" "El grupo %(group_id)s devuelto por la correlación %(mapping_id)s no se ha " "encontrado en el programa de fondo." #, python-format msgid "" "Group membership across backend boundaries is not allowed, group in question " "is %(group_id)s, user is %(user_id)s" msgstr "" "La pertenencia a grupos en los límites del programa de fondo no está " "permitida, el grupo en cuestión es %(group_id)s, el usuario es %(user_id)s" #, python-format msgid "ID attribute %(id_attr)s not found in LDAP object %(dn)s" msgstr "" "No se ha encontrado el ID de atributo %(id_attr)s en el objeto LDAP %(dn)s" #, python-format msgid "Identity Provider %(idp)s is disabled" msgstr "El proveedor de identidad %(idp)s está inhabilitado" msgid "" "Incoming identity provider identifier not included among the accepted " "identifiers." msgstr "" "No se ha incluido el identificador del proveedor de identidad de entrada " "entre los identificadores aceptados." msgid "Invalid EC2 signature." msgstr "Firma de EC2 no válida." #, python-format msgid "Invalid LDAP TLS certs option: %(option)s. Choose one of: %(options)s" msgstr "Opción de LDAP TLS no válida: %(option)s. Elegir uno de: %(options)s" #, python-format msgid "Invalid LDAP TLS_AVAIL option: %s. TLS not available" msgstr "Opción LDAP TLS_AVAIL inválida: %s. TLS no disponible" #, python-format msgid "Invalid LDAP deref option: %(option)s. Choose one of: %(options)s" msgstr "Opción deref LDAP no válida: %(option)s. Elija una de: %(options)s" #, python-format msgid "Invalid LDAP scope: %(scope)s. Choose one of: %(options)s" msgstr "" "Ámbito LDAP incorrecto: %(scope)s. Selecciones una de las siguientes " "opciones: %(options)s" msgid "Invalid TLS / LDAPS combination" msgstr "Combinación TLS/LDAPS no válida" #, python-format msgid "Invalid audit info data type: %(data)s (%(type)s)" msgstr "" "Tipo de datos de información de auditoría no válido: %(data)s (%(type)s)" msgid "Invalid blob in credential" msgstr "Blob no válido en credencial" #, python-format msgid "" "Invalid domain name: %(domain)s found in config file name: %(file)s - " "ignoring this file." msgstr "" "Nombre de dominio no válido: %(domain)s encontrado en el nombre de archivo " "de configuración: %(file)s - ignorando este archivo." #, python-format msgid "Invalid domain specific configuration: %(reason)s" msgstr "Configuración específica de dominio no válida: %(reason)s" #, python-format msgid "Invalid input for field '%(path)s'. The value is '%(value)s'." msgstr "Entrada no válida para el campo '%(path)s'. El valor es '%(value)s'." msgid "Invalid limit value" msgstr "Valor de límite no válido" #, python-format msgid "" "Invalid mix of entities for policy association - only Endpoint, Service or " "Region+Service allowed. Request was - Endpoint: %(endpoint_id)s, Service: " "%(service_id)s, Region: %(region_id)s" msgstr "" "Combinación no válida de entidades para la asociación de políticas: solo se " "permite Punto final, Servicio o Región + Servicio. La solicitud fue: Punto " "final: %(endpoint_id)s, Servicio: %(service_id)s, Región: %(region_id)s" #, python-format msgid "" "Invalid rule: %(identity_value)s. Both 'groups' and 'domain' keywords must " "be specified." msgstr "" "Regla no válida: %(identity_value)s. Se deben especificar las palabras clave " "'grupos' y 'dominio ." msgid "Invalid signature" msgstr "Firma no válida" msgid "Invalid user / password" msgstr "Usuario / contraseña no válidos" msgid "Invalid username or TOTP passcode" msgstr "Nombre de usuario o código de acceso TOTP no válido" msgid "Invalid username or password" msgstr "Usuario o contraseña no válidos" #, python-format msgid "KVS region %s is already configured. Cannot reconfigure." msgstr "La región KVS %s ya se ha configurado. No se puede reconfigurar." #, python-format msgid "Key Value Store not configured: %s" msgstr "Almacén de valor de clave no configurado: %s" #, python-format msgid "LDAP %s create" msgstr "Creación de LDAP %s" #, python-format msgid "LDAP %s delete" msgstr "Supresión de LDAP %s" #, python-format msgid "LDAP %s update" msgstr "Actualización de LDAP %s" msgid "" "Length of transformable resource id > 64, which is max allowed characters" msgstr "" "Longitud del ID de recurso transformable > 64, que es el número máximo de " "caracteres permitidos" #, python-format msgid "" "Local section in mapping %(mapping_id)s refers to a remote match that " "doesn't exist (e.g. {0} in a local section)." msgstr "" "La sección local de la correlación %(mapping_id)s hace referencia a una " "coincidencia remota que no existe (p.e. {0} en una sección local)." #, python-format msgid "Lock Timeout occurred for key, %(target)s" msgstr "Se ha producido tiempo de espera de bloqueo para la clave, %(target)s" #, python-format msgid "Lock key must match target key: %(lock)s != %(target)s" msgstr "" "La clave de bloqueo debe coincidir con la clave de destino: %(lock)s != " "%(target)s" #, python-format msgid "Malformed endpoint URL (%(endpoint)s), see ERROR log for details." msgstr "" "URL de punto final formado incorrectamente (%(endpoint)s), vea el registro " "de ERROR para obtener detalles." msgid "Marker could not be found" msgstr "No se ha podido encontrar el marcador" #, python-format msgid "Max hierarchy depth reached for %s branch." msgstr "Se ha alcanzado la profundidad máxima de jerarquía en la rama %s." #, python-format msgid "Maximum lock attempts on %s occurred." msgstr "Se han producido los intentos de bloqueo máximos en %s." #, python-format msgid "Member %(member)s is already a member of group %(group)s" msgstr "El miembro %(member)s ya es miembro del grupo %(group)s" #, python-format msgid "Method not callable: %s" msgstr "Método no invocable: %s" msgid "Missing entity ID from environment" msgstr "Falta el ID de entidad del entorno" msgid "" "Modifying \"redelegation_count\" upon redelegation is forbidden. Omitting " "this parameter is advised." msgstr "" "La modificación de \"redelegation_count\" tras la redelegación está " "prohibida. Se recomienda omitir este parámetro." msgid "Multiple domains are not supported" msgstr "No se admiten varios dominios" msgid "Must be called within an active lock context." msgstr "Se debe llamar dentro de un contexto de bloqueo activo." msgid "Must specify either domain or project" msgstr "Debe especificar dominio o proyecto" msgid "Name field is required and cannot be empty" msgstr "El nombre de campo es necesario y no puede estar vacío" msgid "Neither Project Domain ID nor Project Domain Name was provided." msgstr "" "No se ha proporcionado el ID de dominio de proyecto ni el nombre de dominio " "de proyecto." msgid "" "No Authorization headers found, cannot proceed with OAuth related calls, if " "running under HTTPd or Apache, ensure WSGIPassAuthorization is set to On." msgstr "" "No se han encontrado cabeceras de autorización, no se puede continuar con " "las llamadas relacionadas OAuth, si se están ejecutando bajo HTTPd o Apache, " "asegúrese de que WSGIPassAuthorization se establece en activada." msgid "No authenticated user" msgstr "Ningún usuario autenticado " msgid "" "No encryption keys found; run keystone-manage fernet_setup to bootstrap one." msgstr "" "No se han encontrado claves de cifrado; ejecute keystone-manage fernet_setup " "para el programa de arranque uno." msgid "No options specified" msgstr "No se especificaron opciones" #, python-format msgid "No policy is associated with endpoint %(endpoint_id)s." msgstr "No hay ninguna política asociada con el punto final %(endpoint_id)s." #, python-format msgid "No remaining uses for trust: %(trust_id)s" msgstr "No quedan usos para la confianza: %(trust_id)s" msgid "No token in the request" msgstr "No hay ningún token en la solicitud" msgid "Non-default domain is not supported" msgstr "El dominio no predeterminado no está soportado" msgid "One of the trust agents is disabled or deleted" msgstr "Uno de los agentes de confianza está inhabilitado o se ha suprimido" #, python-format msgid "" "Option %(option)s found with no group specified while checking domain " "configuration request" msgstr "" "Se ha encontrado la opción %(option)s sin grupo especificado al comprobar la " "solicitud de configuración del dominio" #, python-format msgid "" "Option %(option)s in group %(group)s is not supported for domain specific " "configurations" msgstr "" "La opción %(option)s del grupo %(group)s no se admite para las " "configuraciones específicas del dominio" #, python-format msgid "Project (%s)" msgstr "Proyecto (%s)" #, python-format msgid "Project ID not found: %(t_id)s" msgstr "ID de proyecto no encontrado: %(t_id)s" msgid "Project field is required and cannot be empty." msgstr "El campo de proyecto es obligatorio y no puede estar vacío." #, python-format msgid "Project is disabled: %s" msgstr "El proyecto está inhabilitado: %s" msgid "Project name cannot contain reserved characters." msgstr "El nombre de proyecto no puede contener caracteres reservados." msgid "Query string is not UTF-8 encoded" msgstr "La serie de consulta no tiene codificación UTF-8" #, python-format msgid "" "Reading the default for option %(option)s in group %(group)s is not supported" msgstr "" "No se da soporte para leer el valor predeterminado para la opción %(option)s " "del grupo %(group)s" msgid "Redelegation allowed for delegated by trust only" msgstr "Sólo se permite volver a delegar un delegado por confianza" #, python-format msgid "" "Remaining redelegation depth of %(redelegation_depth)d out of allowed range " "of [0..%(max_count)d]" msgstr "" "La profundidad de redelegación restante de %(redelegation_depth)d está fuera " "del rango permitido de [0..%(max_count)d]" msgid "" "Remove admin_crud_extension from the paste pipeline, the admin_crud " "extension is now always available. Updatethe [pipeline:admin_api] section in " "keystone-paste.ini accordingly, as it will be removed in the O release." msgstr "" "Elimine admin_crud_extension de la interconexión de pegar, la extensión " "admin_crud ahora está siempre disponible. Actualice la sección [pipeline:" "admin_api] en keystone-paste.ini consecuentemente, ya que se eliminará en el " "release O." msgid "" "Remove endpoint_filter_extension from the paste pipeline, the endpoint " "filter extension is now always available. Update the [pipeline:api_v3] " "section in keystone-paste.ini accordingly as it will be removed in the O " "release." msgstr "" "Elimine endpoint_filter_extension de la interconexión de pegar, la extensión " "de filtro de punto final ahora está siempre disponible. Actualice la " "sección [pipeline:api_v3] en keystone-paste.ini consecuentemente, ya que se " "eliminará en el release O." msgid "" "Remove federation_extension from the paste pipeline, the federation " "extension is now always available. Update the [pipeline:api_v3] section in " "keystone-paste.ini accordingly, as it will be removed in the O release." msgstr "" "Elimine federation_extension de la interconexión de pegar, la extensión de " "federación ahora está siempre disponible. Actualice la sección [pipeline:" "api_v3] en keystone-paste.ini consecuentemente, ya que se eliminará en el " "release O." msgid "" "Remove oauth1_extension from the paste pipeline, the oauth1 extension is now " "always available. Update the [pipeline:api_v3] section in keystone-paste.ini " "accordingly, as it will be removed in the O release." msgstr "" "Elimine oauth1_extension de la interconexión de pegar, la extensión oauth1 " "ahora está siempre disponible. Actualice la sección [pipeline:api_v3] en " "keystone-paste.ini consecuentemente, ya que se eliminará en el release O." msgid "" "Remove revoke_extension from the paste pipeline, the revoke extension is now " "always available. Update the [pipeline:api_v3] section in keystone-paste.ini " "accordingly, as it will be removed in the O release." msgstr "" "Elimine revoke_extension de la interconexión de pegar, la extensión de " "revocación ahora está siempre disponible. Actualice la sección [pipeline:" "api_v3] en keystone-paste.ini consecuentemente, ya que se eliminará en el " "release O." msgid "" "Remove simple_cert from the paste pipeline, the PKI and PKIz token providers " "are now deprecated and simple_cert was only used insupport of these token " "providers. Update the [pipeline:api_v3] section in keystone-paste.ini " "accordingly, as it will be removed in the O release." msgstr "" "Elimine simple_cert de la interconexión de pegar, los proveedores de token " "PKI y PKIz están ahora en desuso y simple_cert se utilizaba únicamente para " "dar soporte a estos proveedores de token. Actualice la sección [pipeline:" "api_v3] en keystone-paste.ini consecuentemente, ya que se eliminará en el " "release O." msgid "" "Remove user_crud_extension from the paste pipeline, the user_crud extension " "is now always available. Updatethe [pipeline:public_api] section in keystone-" "paste.ini accordingly, as it will be removed in the O release." msgstr "" "Elimine user_crud_extension de la interconexión de pegar, la extensión " "user_crud ahora está siempre disponible. Actualice la sección [pipeline:" "public_api] en keystone-paste.ini consecuentemente, ya que se eliminará en " "el release O." msgid "Request Token does not have an authorizing user id" msgstr "El token de solicitud no tiene un id de usuario de autorización" #, python-format msgid "" "Request attribute %(attribute)s must be less than or equal to %(size)i. The " "server could not comply with the request because the attribute size is " "invalid (too large). The client is assumed to be in error." msgstr "" "El atributo de solicitud %(attribute)s debe ser menor que o igual a " "%(size)i. El servidor no pudo cumplir con la solicitud debido al tamaño del " "atributo no es válido (demasiado grande). El cliente se asume en error." msgid "Request must have an origin query parameter" msgstr "La solicitud debe tener un parámetro de consulta de origen" msgid "Request token is expired" msgstr "El token solicitado ha expirado" msgid "Request token not found" msgstr "No se ha encontrado el token solicitado" msgid "Requested expiration time is more than redelegated trust can provide" msgstr "" "El tiempo de caducidad solicitado es mayor que el que puede proporcionar la " "confianza redelegada" #, python-format msgid "" "Requested redelegation depth of %(requested_count)d is greater than allowed " "%(max_count)d" msgstr "" "La profundidad de redelegación solicitada de %(requested_count)d es mayor " "que la permitida %(max_count)d" msgid "" "Running keystone via eventlet is deprecated as of Kilo in favor of running " "in a WSGI server (e.g. mod_wsgi). Support for keystone under eventlet will " "be removed in the \"M\"-Release." msgstr "" "La ejecución de keystone a través de eventlet está en desuso a partir de " "Kilo sustituyéndose por la ejecución en un servidor WSGI (por ejemplo, " "mod_wsgi). El soporte para keystone bajo eventlet se eliminará en \"M\"-" "Release." msgid "Scoping to both domain and project is not allowed" msgstr "El ámbito para dominio y proyecto no está permitido" msgid "Scoping to both domain and trust is not allowed" msgstr "El ámbito para dominio y confianza no está permitido" msgid "Scoping to both project and trust is not allowed" msgstr "El ámbito para proyecto y confianza no está permitido" #, python-format msgid "Service Provider %(sp)s is disabled" msgstr "El proveedor de servicios %(sp)s está inhabilitado" msgid "Some of requested roles are not in redelegated trust" msgstr "Algunos roles solicitados no están en la confianza redelegada" msgid "Specify a domain or project, not both" msgstr "Especifique un dominio o proyecto, no ambos" msgid "Specify a user or group, not both" msgstr "Especifique un usuario o grupo, no ambos" msgid "Specify one of domain or project" msgstr "Especifique un dominio o proyecto" msgid "Specify one of user or group" msgstr "Especifique un usuario o grupo" #, python-format msgid "" "String length exceeded.The length of string '%(string)s' exceeded the limit " "of column %(type)s(CHAR(%(length)d))." msgstr "" "La longitud de la serie se ha excedido. La longitud de la serie '%(string)s' " "ha excedido el límite de la columna %(type)s(CHAR(%(length)d))." msgid "Tenant name cannot contain reserved characters." msgstr "El nombre de inquilino no puede contener caracteres reservados." #, python-format msgid "" "The %s extension has been moved into keystone core and as such its " "migrations are maintained by the main keystone database control. Use the " "command: keystone-manage db_sync" msgstr "" "La extensión %s se ha trasladado al núcleo de keystone y, como tal, el " "mantenimiento de sus migraciones se hace desde el control de bases de datos " "principal de keystone. Utilice el comando: keystone-manage db_sync" msgid "" "The 'expires_at' must not be before now. The server could not comply with " "the request since it is either malformed or otherwise incorrect. The client " "is assumed to be in error." msgstr "" "'expires_at' no debe ser antes que ahora. El servidor podría no cumplir la " "solicitud porque tiene un formato incorrecto o es incorrecta de alguna otra " "forma. Se supone que el cliente es erróneo." msgid "The --all option cannot be used with the --domain-name option" msgstr "La opción --all no se puede utilizar con la opción --domain-name" #, python-format msgid "The Keystone configuration file %(config_file)s could not be found." msgstr "" "El archivo de configuración de Keystone %(config_file)s no se ha podido " "encontrar." #, python-format msgid "" "The Keystone domain-specific configuration has specified more than one SQL " "driver (only one is permitted): %(source)s." msgstr "" "La configuración específica del dominio Keystone ha especificado más de un " "controlador SQL (sólo se permite uno): %(source)s." msgid "The action you have requested has not been implemented." msgstr "La acción que ha solicitado no ha sido implemento" msgid "The authenticated user should match the trustor." msgstr "El usuario autenticado debe coincidir con el fideicomitente." msgid "" "The certificates you requested are not available. It is likely that this " "server does not use PKI tokens otherwise this is the result of " "misconfiguration." msgstr "" "Los certificados que ha solicitado no están disponibles. Es probable que " "este servidor no utilice señales PKI, de lo contrario este es el resultado " "de una configuración incorrecta." msgid "The configured token provider does not support bind authentication." msgstr "" "El proveedor de señales configurado no da soporte a la autenticación de " "enlaces." msgid "The creation of projects acting as domains is not allowed in v2." msgstr "En la v2, no se permite crear proyectos que actúen como dominios." #, python-format msgid "" "The password length must be less than or equal to %(size)i. The server could " "not comply with the request because the password is invalid." msgstr "" "La longitud de la contraseña debe ser menor o igual que %(size)i. El " "servidor no pudo cumplir la solicitud porque la contraseña no es válida." msgid "The request you have made requires authentication." msgstr "La solicitud que ha hecho requiere autenticación." msgid "The resource could not be found." msgstr "El recurso no se ha podido encontrar." msgid "" "The revoke call must not have both domain_id and project_id. This is a bug " "in the Keystone server. The current request is aborted." msgstr "" "La llamada de revocación debe tener un id_dominio y un id_proyecto. Esto es " "un error del servidor de Keystone. La solicitud actual ha terminado " "anormalmente." msgid "The service you have requested is no longer available on this server." msgstr "El servicio que ha solicitado ya no está disponible en este servidor." #, python-format msgid "" "The specified parent region %(parent_region_id)s would create a circular " "region hierarchy." msgstr "" "La región padre %(parent_region_id)s especificada crearía una jerarquía de " "regiones circular." #, python-format msgid "" "The value of group %(group)s specified in the config should be a dictionary " "of options" msgstr "" "El valor de grupo %(group)s especificado en la configuración debe ser un " "diccionario de opciones" msgid "There should not be any non-oauth parameters" msgstr "Solo puede haber parámetros de oauth" #, python-format msgid "This is not a recognized Fernet payload version: %s" msgstr "Esta no es una versión de carga útil Fernet reconocida: %s" #, python-format msgid "This is not a recognized Fernet token %s" msgstr "Este no es un token Fernet reconocido %s" msgid "" "Timestamp not in expected format. The server could not comply with the " "request since it is either malformed or otherwise incorrect. The client is " "assumed to be in error." msgstr "" "La indicación de fecha y hora no está en el formato esperado. El servidor no " "ha podido satisfacer la solicitud porque tiene un formato incorrecto o es " "incorrecta de alguna otra forma. Se supone que el cliente es erróneo." #, python-format msgid "" "To get a more detailed information on this error, re-run this command for " "the specific domain, i.e.: keystone-manage domain_config_upload --domain-" "name %s" msgstr "" "Para obtener información más detallada sobre este error, vuelva a ejecutar " "este mandato para el dominio especificado, por ejemplo: keystone-manage " "domain_config_upload --domain-name %s" msgid "Token belongs to another user" msgstr "El token pertenece a otro usuario" msgid "Token does not belong to specified tenant." msgstr "La señal no pertenece al arrendatario especificado." msgid "Token version is unrecognizable or unsupported." msgstr "Versión de la señal no reconocida o no soportada." msgid "Trustee has no delegated roles." msgstr "La entidad de confianza no tiene roles delegados." msgid "Trustor is disabled." msgstr "Trustor está deshabilitado." #, python-format msgid "" "Trying to update group %(group)s, so that, and only that, group must be " "specified in the config" msgstr "" "Intentando actualizar el grupo %(group)s, para que ese, y sólo ese grupo se " "especifique en la configuración" #, python-format msgid "" "Trying to update option %(option)s in group %(group)s, but config provided " "contains option %(option_other)s instead" msgstr "" "Intentando actualizar la opción %(option)s en el grupo %(group)s, pero la " "configuración proporcionada contiene la opción %(option_other)s en su lugar" #, python-format msgid "" "Trying to update option %(option)s in group %(group)s, so that, and only " "that, option must be specified in the config" msgstr "" "Intentando actualizar la opción %(option)s en el grupo %(group)s, para que " "esa, y solo esa opción, se especifique en la configuración" msgid "" "Unable to access the keystone database, please check it is configured " "correctly." msgstr "" "No se puede acceder a la base de datos de keystone, compruebe si está " "configurada correctamente." #, python-format msgid "Unable to consume trust %(trust_id)s, unable to acquire lock." msgstr "" "No se puede consumir la confianza %(trust_id)s, no se puede adquirir el " "bloqueo." #, python-format msgid "" "Unable to delete region %(region_id)s because it or its child regions have " "associated endpoints." msgstr "" "No se puede suprimir la región %(region_id)s porque sus regiones secundarias " "tienen puntos finales asociados." msgid "Unable to downgrade schema" msgstr "No se ha podido degradar el esquema" #, python-format msgid "Unable to find valid groups while using mapping %(mapping_id)s" msgstr "" "No se pueden encontrar grupos válidos mientras se utiliza la correlación " "%(mapping_id)s" #, python-format msgid "Unable to locate domain config directory: %s" msgstr "No se ha podido localizar el directorio config de dominio: %s" #, python-format msgid "Unable to lookup user %s" msgstr "No se ha podido buscar el usuario %s" #, python-format msgid "" "Unable to reconcile identity attribute %(attribute)s as it has conflicting " "values %(new)s and %(old)s" msgstr "" "No se puede reconciliar el atributo de identidad %(attribute)s porque tiene " "los valores en conflicto %(new)s y %(old)s" #, python-format msgid "" "Unable to sign SAML assertion. It is likely that this server does not have " "xmlsec1 installed, or this is the result of misconfiguration. Reason " "%(reason)s" msgstr "" "No se puede firmar la aserción SAML. Es probable que este servidor no tenga " "xmlsec1 instalado o que sea el resultado de una configuración incorrecta. " "Razón %(reason)s" msgid "Unable to sign token." msgstr "No se ha podido firmar la señal." #, python-format msgid "Unexpected assignment type encountered, %s" msgstr "Se ha encontrado un tipo de asignación inesperado, %s" #, python-format msgid "" "Unexpected combination of grant attributes - User: %(user_id)s, Group: " "%(group_id)s, Project: %(project_id)s, Domain: %(domain_id)s" msgstr "" "Combinación no esperada de atributos de otorgamiento - Usuario: %(user_id)s, " "Grupo: %(group_id)s, Proyecto: %(project_id)s, Dominio: %(domain_id)s" #, python-format msgid "Unexpected status requested for JSON Home response, %s" msgstr "Estado inesperado solicitado para la respuesta de JSON Home, %s" msgid "Unknown Target" msgstr "Destino desconocido" #, python-format msgid "Unknown domain '%(name)s' specified by --domain-name" msgstr "Dominio desconocido '%(name)s' especificado por --domain-name" #, python-format msgid "Unknown token version %s" msgstr "Versión de señal desconocida %s" #, python-format msgid "Unregistered dependency: %(name)s for %(targets)s" msgstr "Dependencia no registrada: %(name)s para %(targets)s" msgid "Update of `domain_id` is not allowed." msgstr "No se permite la actualización de `domain_id`." msgid "Update of `is_domain` is not allowed." msgstr "No se permite la actualización de `is_domain`." msgid "Update of `parent_id` is not allowed." msgstr "No se permite la actualización de `parent_id`." msgid "Update of domain_id is only allowed for root projects." msgstr "Solo se permite actualizar el domain_id de los proyectos raíz." msgid "Update of domain_id of projects acting as domains is not allowed." msgstr "" "No se permite actualizar el domain_id de los proyectos que actúen como " "dominios." msgid "Use a project scoped token when attempting to create a SAML assertion" msgstr "" "Utilice un token de ámbito de proyecto cuando intente crear una aserción SAML" msgid "" "Use of the identity driver config to automatically configure the same " "assignment driver has been deprecated, in the \"O\" release, the assignment " "driver will need to be expicitly configured if different than the default " "(SQL)." msgstr "" "El uso de la configuración del controlador de identidad para configurar " "automáticamente el mismo controlador de asignación está en desuso. En el " "release \"O\", el controlador de asignación se deberá configurar " "explícitamente si es distinto que el valor predeterminado (SQL)." #, python-format msgid "User %(u_id)s is unauthorized for tenant %(t_id)s" msgstr "El usuario %(u_id)s no está autorizado en el proyecto %(t_id)s" #, python-format msgid "User %(user_id)s has no access to domain %(domain_id)s" msgstr "El usuario %(user_id)s no tiene acceso al Dominio %(domain_id)s" #, python-format msgid "User %(user_id)s has no access to project %(project_id)s" msgstr "El usuario %(user_id)s no tiene acceso al proyecto %(project_id)s" #, python-format msgid "User %(user_id)s is already a member of group %(group_id)s" msgstr "El usuario %(user_id)s ya es miembro del grupo %(group_id)s" #, python-format msgid "User '%(user_id)s' not found in group '%(group_id)s'" msgstr "Usuario '%(user_id)s' no encontrado en el grupo '%(group_id)s'" msgid "User IDs do not match" msgstr "ID de usuario no coinciden" msgid "" "User auth cannot be built due to missing either user id, or user name with " "domain id, or user name with domain name." msgstr "" "No se puede crear la autorización de usuario porque falta el ID de usuario o " "el nombre de usuario con el ID de dominio, o el nombre de usuario con el " "nombre de dominio." #, python-format msgid "User is disabled: %s" msgstr "El usuario está inhabilitado: %s" msgid "User is not a member of the requested project" msgstr "El usuario no es miembro del proyecto solicitado" msgid "User is not a trustee." msgstr "El usuario no es de confianza." msgid "User not found" msgstr "Usuario no encontrado" msgid "User not valid for tenant." msgstr "Usuario no válido para este inquilino." msgid "User roles not supported: tenant_id required" msgstr "Roles de usuario no admitidos: tenant_id obligatorio" #, python-format msgid "User type %s not supported" msgstr "El tipo de usuario %s no está soportado" msgid "You are not authorized to perform the requested action." msgstr "No está autorizado para realizar la acción solicitada." #, python-format msgid "You are not authorized to perform the requested action: %(action)s" msgstr "No está autorizado para realizar la acción solicitada: %(action)s" msgid "" "You have tried to create a resource using the admin token. As this token is " "not within a domain you must explicitly include a domain for this resource " "to belong to." msgstr "" "Ha intentado crear un recurso utilizando el token de administración. Dado " "que este token no se encuentra dentro de un dominio, debe incluir " "explícitamente un dominio al que pertenecerá este recurso." msgid "`key_mangler` functions must be callable." msgstr "Las funciones `key_mangler` se deben poder llamar." msgid "`key_mangler` option must be a function reference" msgstr "La opción `key_mangler` debe ser una referencia de función" msgid "any options" msgstr "cualquier opción" msgid "auth_type is not Negotiate" msgstr "auth_type no es Negotiate" msgid "authorizing user does not have role required" msgstr "el usuario de autorización no tiene la función requerida" #, python-format msgid "cannot create a project in a branch containing a disabled project: %s" msgstr "" "No se puede crear un proyecto en una rama que contiene un proyecto " "inhabilitado: %s" #, python-format msgid "" "cannot delete an enabled project acting as a domain. Please disable the " "project %s first." msgstr "" "no se puede suprimir un proyecto habilitado que actúe como dominio. " "Inhabilite el proyecto %s." #, python-format msgid "group %(group)s" msgstr "grupo %(group)s" msgid "" "idp_contact_type must be one of: [technical, other, support, administrative " "or billing." msgstr "" "idp_contact_type debe ser una de estas opciones: [técnico, otros, soporte, " "administrativo o facturación." #, python-format msgid "invalid date format %s" msgstr "formato de fecha no válido %s" #, python-format msgid "" "it is not permitted to have two projects acting as domains with the same " "name: %s" msgstr "" "no se permite tener dos proyectos actuando como dominios con el mismo " "nombre: %s" #, python-format msgid "" "it is not permitted to have two projects within a domain with the same " "name : %s" msgstr "" "no se permite tener dos proyectos dentro de un dominio con el mismo nombre: " "%s" msgid "only root projects are allowed to act as domains." msgstr "Sólo los proyectos raíz pueden actuar como dominios." #, python-format msgid "option %(option)s in group %(group)s" msgstr "opción %(option)s en el grupo %(group)s" msgid "provided consumer key does not match stored consumer key" msgstr "" "la clave de consumidor proporcionada no coincide con la clave de consumidor " "almacenada" msgid "provided request key does not match stored request key" msgstr "" "la clave de solicitud proporcionada no coincide con la clave de solicitud " "almacenada" msgid "provided verifier does not match stored verifier" msgstr "el verificador proporcionado no coincide con el verificador almacenado" msgid "remaining_uses must be a positive integer or null." msgstr "remaining_uses debe ser un entero positivo o nulo." msgid "remaining_uses must not be set if redelegation is allowed" msgstr "remaining_uses no se debe establecer si se permite la redelegación" #, python-format msgid "" "request to update group %(group)s, but config provided contains group " "%(group_other)s instead" msgstr "" "solicitud para actualizar el grupo %(group)s, pero la configuración " "proporcionada contiene el grupo %(group_other)s en su lugar" msgid "rescope a scoped token" msgstr "Volver a establecer el ámbito de una señal con ámbito" #, python-format msgid "role %s is not defined" msgstr "el rol %s no está definido" msgid "scope.project.id must be specified if include_subtree is also specified" msgstr "" "Se debe especificar scope.project.id si se especifica también include_subtree" #, python-format msgid "tls_cacertdir %s not found or is not a directory" msgstr "No se ha encontrado o no es un directorio tls_cacertdir %s" #, python-format msgid "tls_cacertfile %s not found or is not a file" msgstr "No se ha encontrado o no es un fichero tls_cacertfile %s" #, python-format msgid "token reference must be a KeystoneToken type, got: %s" msgstr "" "la referencia de señal debe ser un tipo KeystoneToken, se ha obtenido: %s" msgid "" "update of domain_id is deprecated as of Mitaka and will be removed in O." msgstr "" "La actualización de domain_id está en desuso en Mitaka y se eliminará en O." #, python-format msgid "" "validated expected to find %(param_name)r in function signature for " "%(func_name)r." msgstr "" "En la validación se esperaba encontrar %(param_name)r en la firma de función " "para %(func_name)r." keystone-9.0.0/keystone/locale/ja/0000775000567000056710000000000012701407246020217 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/locale/ja/LC_MESSAGES/0000775000567000056710000000000012701407246022004 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/locale/ja/LC_MESSAGES/keystone-log-critical.po0000664000567000056710000000154712701407102026552 0ustar jenkinsjenkins00000000000000# Translations template for keystone. # Copyright (C) 2015 OpenStack Foundation # This file is distributed under the same license as the keystone project. # # Translators: # Akihiro Motoki , 2015. #zanata msgid "" msgstr "" "Project-Id-Version: keystone 9.0.0.0b2.dev256\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n" "POT-Creation-Date: 2016-01-18 05:50+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2014-08-31 03:19+0000\n" "Last-Translator: openstackjenkins \n" "Language: ja\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Japanese\n" #, python-format msgid "Unable to open template file %s" msgstr "テンプレートファイル %s を開けません" keystone-9.0.0/keystone/locale/ja/LC_MESSAGES/keystone.po0000664000567000056710000017632412701407105024214 0ustar jenkinsjenkins00000000000000# Translations template for keystone. # Copyright (C) 2015 OpenStack Foundation # This file is distributed under the same license as the keystone project. # # Translators: # Tomoyuki KATO , 2012-2013 # Akihiro Motoki , 2015. #zanata # 笹原 昌美 , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: keystone 9.0.0.0rc2.dev14\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n" "POT-Creation-Date: 2016-03-29 11:20+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-30 04:58+0000\n" "Last-Translator: 笹原 昌美 \n" "Language: ja\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Japanese\n" #, python-format msgid "%(detail)s" msgstr "%(detail)s" #, python-format msgid "%(driver)s is not supported driver version" msgstr "%(driver)s はサポートされるドライバーバージョンではありません" #, python-format msgid "" "%(entity)s name cannot contain the following reserved characters: %(chars)s" msgstr "%(entity)s 名に以下の予約済み文字を含めることはできません: %(chars)s" #, python-format msgid "" "%(event)s is not a valid notification event, must be one of: %(actions)s" msgstr "" "%(event)s は有効な通知イベントではありません。%(actions)s のいずれかでなけれ" "ばなりません。" #, python-format msgid "%(host)s is not a trusted dashboard host" msgstr "%(host)s は信頼されたダッシュボードホストではありません" #, python-format msgid "%(message)s %(amendment)s" msgstr "%(message)s %(amendment)s" #, python-format msgid "" "%(mod_name)s doesn't provide database migrations. The migration repository " "path at %(path)s doesn't exist or isn't a directory." msgstr "" "%(mod_name)s はデータベースマイグレーションを提供していません。%(path)s のマ" "イグレーションリポジトリーのパスが存在しないか、ディレクトリーではないかのい" "ずれかです。" #, python-format msgid "%(prior_role_id)s does not imply %(implied_role_id)s" msgstr "%(prior_role_id)s は %(implied_role_id)s を暗黙的に示しません" #, python-format msgid "%(property_name)s cannot be less than %(min_length)s characters." msgstr "%(property_name)s は %(min_length)s 文字より短くできません。" #, python-format msgid "%(property_name)s is not a %(display_expected_type)s" msgstr "%(property_name)s が %(display_expected_type)s ではありません。" #, python-format msgid "%(property_name)s should not be greater than %(max_length)s characters." msgstr "%(property_name)s は %(max_length)s 文字より長くできません。" #, python-format msgid "%(role_id)s cannot be an implied roles" msgstr "%(role_id)s は暗黙的ロールにできません" #, python-format msgid "%s cannot be empty." msgstr "%s は空にはできません。" #, python-format msgid "%s extension does not exist." msgstr "%s 拡張が存在しません。" #, python-format msgid "%s field is required and cannot be empty" msgstr "フィールド %s は必須フィールドであるため、空にできません" #, python-format msgid "%s field(s) cannot be empty" msgstr "フィールド %s を空にすることはできません" #, python-format msgid "" "%s for the LDAP identity backend has been deprecated in the Mitaka release " "in favor of read-only identity LDAP access. It will be removed in the \"O\" " "release." msgstr "" "LDAP ID バックエンドの %s は Mitaka リリースにおいて読み取り専用の ID LDAP ア" "クセスを選択したため、提供を終了しています。これは \"O\" リリースで削除される" "予定です。" msgid "(Disable insecure_debug mode to suppress these details.)" msgstr "(これらの詳細を抑制するには、insecure_debug モードを無効にします。)" msgid "--all option cannot be mixed with other options" msgstr "--all オプションを他のオプションと組み合わせて使用することはできません" msgid "A project-scoped token is required to produce a service catalog." msgstr "" "サービスカタログを生成するには、プロジェクトにスコープが設定されたトークンが" "必要です。" msgid "Access token is expired" msgstr "アクセストークンの有効期限が切れています" msgid "Access token not found" msgstr "アクセストークンが見つかりません" msgid "Additional authentications steps required." msgstr "追加認証手順が必要です。" msgid "An unexpected error occurred when retrieving domain configs" msgstr "ドメイン設定の取得中に予期しないエラーが発生しました" #, python-format msgid "An unexpected error occurred when trying to store %s" msgstr "%s の保存中に予期しないエラーが発生しました" msgid "An unexpected error prevented the server from fulfilling your request." msgstr "予期しないエラーが発生したため、サーバーが要求を完了できませんでした。" #, python-format msgid "" "An unexpected error prevented the server from fulfilling your request: " "%(exception)s" msgstr "" "予期しないエラーが発生したため、サーバーが要求を完了できませんでした: " "%(exception)s" msgid "An unhandled exception has occurred: Could not find metadata." msgstr "処理できない例外が発生しました。メタデータが見つかりませんでした。" msgid "At least one option must be provided" msgstr "少なくとも 1 つはオプションを指定する必要があります" msgid "At least one option must be provided, use either --all or --domain-name" msgstr "" "少なくとも 1 つのオプションを指定する必要があります。--all または --domain-" "name を使用してください" msgid "At least one role should be specified." msgstr "少なくとも 1 つのロールを指定する必要があります。" #, python-format msgid "" "Attempted automatic driver selection for assignment based upon " "[identity]\\driver option failed since driver %s is not found. Set " "[assignment]/driver to a valid driver in keystone config." msgstr "" "[identity]\\driver オプションに基づく割り当て用にドライバーの自動選択を試みま" "したが、ドライバー %s が見つからなかったため失敗しました。[assignment]/" "driver を Keystone 設定の有効なドライバーに設定してください。" msgid "Attempted to authenticate with an unsupported method." msgstr "サポートされていないメソッドを使用して認証を行おうとしました。" msgid "" "Attempting to use OS-FEDERATION token with V2 Identity Service, use V3 " "Authentication" msgstr "" "V2 Identity Service で OS-FEDERATION トークンを使用しようとしています。V3 認" "証を使用してください" msgid "Authentication plugin error." msgstr "認証プラグインエラー。" #, python-format msgid "" "Backend `%(backend)s` is not a valid memcached backend. Valid backends: " "%(backend_list)s" msgstr "" "バックエンド `%(backend)s` は有効な memcached バックエンドではありません。有" "効なバックエンド: %(backend_list)s" msgid "Cannot authorize a request token with a token issued via delegation." msgstr "" "委任によって発行されたトークンを使用して要求トークンを許可することはできませ" "ん。" #, python-format msgid "Cannot change %(option_name)s %(attr)s" msgstr "%(option_name)s %(attr)s を変更できません" msgid "Cannot change Domain ID" msgstr "ドメイン ID を変更できません" msgid "Cannot change user ID" msgstr "ユーザー ID を変更できません" msgid "Cannot change user name" msgstr "ユーザー名を変更できません" #, python-format msgid "Cannot create an endpoint with an invalid URL: %(url)s" msgstr "以下の無効な URL を持つエンドポイントを作成できません: %(url)s" #, python-format msgid "Cannot create project with parent: %(project_id)s" msgstr "親を持つプロジェクト: %(project_id)s を作成できません" #, python-format msgid "" "Cannot create project, since it specifies its owner as domain %(domain_id)s, " "but specifies a parent in a different domain (%(parent_domain_id)s)." msgstr "" "プロジェクトでその所有者をドメイン %(domain_id)s として指定しているが、別のド" "メイン (%(parent_domain_id)s) に親を指定しているため、そのプロジェクトを作成" "できません。" #, python-format msgid "" "Cannot create project, since its parent (%(domain_id)s) is acting as a " "domain, but project's specified parent_id (%(parent_id)s) does not match " "this domain_id." msgstr "" "プロジェクトの親 (%(domain_id)s) がドメインとして動作しているが、プロジェク" "トで指定される parent_id (%(parent_id)s) がこの domain_id と一致しないため、" "そのプロジェクトを作成できません。" msgid "Cannot delete a domain that is enabled, please disable it first." msgstr "" "有効になっているドメインは削除できません。最初にそのドメインを無効にしてくだ" "さい。" #, python-format msgid "" "Cannot delete project %(project_id)s since its subtree contains enabled " "projects." msgstr "" "プロジェクト %(project_id)s はそのサブツリーに有効になっているプロジェクトが" "含まれているため削除できません。" #, python-format msgid "" "Cannot delete the project %s since it is not a leaf in the hierarchy. Use " "the cascade option if you want to delete a whole subtree." msgstr "" "プロジェクト %s は階層内の末端ではないため、削除できません。サブツリー全体を" "削除する場合、カスケードオプションを使用してください。" #, python-format msgid "" "Cannot disable project %(project_id)s since its subtree contains enabled " "projects." msgstr "" "プロジェクト %(project_id)s はそのサブツリーに有効になっているプロジェクトが" "含まれているため、無効にできません。" #, python-format msgid "Cannot enable project %s since it has disabled parents" msgstr "親が無効になっているプロジェクト %s は有効にできません" msgid "Cannot list assignments sourced from groups and filtered by user ID." msgstr "" "グループから取得し、ユーザー ID でフィルター処理した割り当てをリストできませ" "ん。" msgid "Cannot list request tokens with a token issued via delegation." msgstr "" "委任によって発行されたトークンを使用して要求トークンをリストすることはできま" "せん。" #, python-format msgid "Cannot open certificate %(cert_file)s. Reason: %(reason)s" msgstr "証明書 %(cert_file)s を開くことができません。理由: %(reason)s" #, python-format msgid "Cannot remove role that has not been granted, %s" msgstr "許可されていないロールを削除できません、%s" #, fuzzy msgid "" "Cannot truncate a driver call without hints list as first parameter after " "self " msgstr "" "セルフの後に最初のパラメーターとしてヒントリストなしでドライバー呼び出しを切" "り捨てることはできません" msgid "Cannot update domain_id of a project that has children." msgstr "子を持つプロジェクトの domain_id を更新できません。" msgid "" "Cannot use parents_as_list and parents_as_ids query params at the same time." msgstr "" "問い合わせパラメーター parents_as_list と parents_as_ids を同時に使用すること" "はできません。" msgid "" "Cannot use subtree_as_list and subtree_as_ids query params at the same time." msgstr "" "問い合わせパラメーター subtree_as_list と subtree_as_ids を同時に使用すること" "はできません。" msgid "Cascade update is only allowed for enabled attribute." msgstr "カスケード更新は有効になっている属性にのみ許可されます。" #, fuzzy msgid "" "Combining effective and group filter will always result in an empty list." msgstr "" "有効フィルターとグループフィルターの組み合わせは常に空のリストになります。" #, fuzzy msgid "" "Combining effective, domain and inherited filters will always result in an " "empty list." msgstr "" "有効フィルター、ドメインフィルター、および継承フィルターの組み合わせは常に空" "のリストになります。" #, python-format msgid "Config API entity at /domains/%s/config" msgstr "/domains/%s/config の Config API エンティティー" #, python-format msgid "Conflict occurred attempting to store %(type)s - %(details)s" msgstr "%(type)s を保存するときに競合が発生しました - %(details)s" #, python-format msgid "Conflicting region IDs specified: \"%(url_id)s\" != \"%(ref_id)s\"" msgstr "" "矛盾するリージョン ID が指定されました: \"%(url_id)s\" != \"%(ref_id)s\"" msgid "Consumer not found" msgstr "コンシューマーが見つかりません" #, python-format msgid "" "Could not change immutable attribute(s) '%(attributes)s' in target %(target)s" msgstr "" "ターゲット %(target)s の変更不可の属性 '%(attributes)s' を変更できませんでし" "た" #, python-format msgid "" "Could not determine Identity Provider ID. The configuration option " "%(issuer_attribute)s was not found in the request environment." msgstr "" "認証プロバイダー ID を判別できませんでした。設定オプション " "%(issuer_attribute)s が要求環境内で見つかりませんでした。" #, python-format msgid "" "Could not find %(group_or_option)s in domain configuration for domain " "%(domain_id)s" msgstr "" "%(group_or_option)s がドメイン %(domain_id)s のドメイン設定に見つかりませんで" "した" #, python-format msgid "Could not find Endpoint Group: %(endpoint_group_id)s" msgstr "エンドポイントグループ %(endpoint_group_id)s が見つかりませんでした" msgid "Could not find Identity Provider identifier in environment" msgstr "Identity Provider ID が環境情報内に見つかりませんでした" #, python-format msgid "Could not find Identity Provider: %(idp_id)s" msgstr "ID プロバイダー %(idp_id)s が見つかりませんでした" #, python-format msgid "Could not find Service Provider: %(sp_id)s" msgstr "サービスプロバイダー %(sp_id)s が見つかりませんでした" #, python-format msgid "Could not find credential: %(credential_id)s" msgstr "クレデンシャルが見つかりませんでした: %(credential_id)s" #, python-format msgid "Could not find domain: %(domain_id)s" msgstr "ドメイン %(domain_id)s が見つかりませんでした" #, python-format msgid "Could not find endpoint: %(endpoint_id)s" msgstr "エンドポイント %(endpoint_id)sが見つかりませんでした" #, python-format msgid "" "Could not find federated protocol %(protocol_id)s for Identity Provider: " "%(idp_id)s" msgstr "" "Identity Provider の連携プロトコル %(protocol_id)s が見つかりませんでした: " "%(idp_id)s" #, python-format msgid "Could not find group: %(group_id)s" msgstr "グループ %(group_id)s が見つかりませんでした" #, python-format msgid "Could not find mapping: %(mapping_id)s" msgstr "マッピング %(mapping_id)s が見つかりませんでした" msgid "Could not find policy association" msgstr "ポリシー関連付けが見つかりませんでした" #, python-format msgid "Could not find policy: %(policy_id)s" msgstr "ポリシー %(policy_id)s が見つかりませんでした" #, python-format msgid "Could not find project: %(project_id)s" msgstr "プロジェクト %(project_id)s が見つかりませんでした" #, python-format msgid "Could not find region: %(region_id)s" msgstr "リージョン %(region_id)s が見つかりませんでした" #, python-format msgid "" "Could not find role assignment with role: %(role_id)s, user or group: " "%(actor_id)s, project or domain: %(target_id)s" msgstr "" "ロール %(role_id)s を持つ割り当てが見つかりませんでした。ユーザーまたはグルー" "プは %(actor_id)s で、プロジェクトまたはドメインが %(target_id)s です" #, python-format msgid "Could not find role: %(role_id)s" msgstr "ロール %(role_id)s が見つかりませんでした" #, python-format msgid "Could not find service: %(service_id)s" msgstr "サービス %(service_id)s が見つかりませんでした" #, python-format msgid "Could not find token: %(token_id)s" msgstr "トークン %(token_id)s が見つかりませんでした" #, python-format msgid "Could not find trust: %(trust_id)s" msgstr "トラスト %(trust_id)s が見つかりませんでした" #, python-format msgid "Could not find user: %(user_id)s" msgstr "ユーザー %(user_id)s が見つかりませんでした:" #, python-format msgid "Could not find version: %(version)s" msgstr "バージョン %(version)s が見つかりませんでした" #, python-format msgid "Could not find: %(target)s" msgstr "%(target)s が見つかりませんでした" msgid "" "Could not map any federated user properties to identity values. Check debug " "logs or the mapping used for additional details." msgstr "" "フェデレーションしたユーザープロパティーのいずれも ID 値にマップすることがで" "きませんでした。デバッグログまたは追加の詳細に使用したマッピングを確認してく" "ださい。" msgid "" "Could not map user while setting ephemeral user identity. Either mapping " "rules must specify user id/name or REMOTE_USER environment variable must be " "set." msgstr "" "一時的なユーザー ID の設定中にユーザーをマップすることができませんでした。" "マッピング規則によってユーザー ID/ユーザー名を指定するか、REMOTE_USER 環境変" "数を設定するか、いずれかを行う必要があります。" msgid "Could not validate the access token" msgstr "アクセストークンを検証できませんでした" msgid "Credential belongs to another user" msgstr "クレデンシャルが別のユーザーに属しています" msgid "Credential signature mismatch" msgstr "クレデンシャルのシグニチャーが一致しません" #, python-format msgid "" "Direct import of auth plugin %(name)r is deprecated as of Liberty in favor " "of its entrypoint from %(namespace)r and may be removed in N." msgstr "" "認証プラグイン %(name)r の直接インポートは、Liberty の時点で %(namespace)r の" "エンドポイントを選択したため、提供を終了しました。N では削除される予定です。" #, python-format msgid "" "Direct import of driver %(name)r is deprecated as of Liberty in favor of its " "entrypoint from %(namespace)r and may be removed in N." msgstr "" "ドライバー %(name)r の直接インポートは、Liberty の時点で %(namespace)r からの" "エントリーポイントを選択したため、 提供を終了しました。N では削除される予定で" "す。" msgid "" "Disabling an entity where the 'enable' attribute is ignored by configuration." msgstr "" "「enable」属性が設定によって無視されているエンティティーを無効化中です。" #, python-format msgid "Domain (%s)" msgstr "ドメイン (%s)" #, python-format msgid "Domain cannot be named %s" msgstr "ドメインに %s という名前を付けることはできません" #, python-format msgid "Domain cannot have ID %s" msgstr "ドメインに %s という ID を付けることはできません" #, python-format msgid "Domain is disabled: %s" msgstr "ドメイン %s が無効になっています" msgid "Domain name cannot contain reserved characters." msgstr "ドメイン名に予約済み文字が含まれていてはなりません。" msgid "Domain scoped token is not supported" msgstr "ドメインをスコープにしたトークンはサポートされていません" msgid "Domain specific roles are not supported in the V8 role driver" msgstr "ドメイン固有のロールは、V8 のロールドライバーではサポートされません" #, python-format msgid "" "Domain: %(domain)s already has a configuration defined - ignoring file: " "%(file)s." msgstr "" "ドメイン %(domain)s には既に定義された設定があります。ファイル %(file)s は無" "視されます。" msgid "Duplicate Entry" msgstr "重複する項目" #, python-format msgid "Duplicate ID, %s." msgstr "重複した ID、%s。" #, python-format msgid "Duplicate entry: %s" msgstr "重複する項目: %s" #, python-format msgid "Duplicate name, %s." msgstr "重複した名前、%s。" #, python-format msgid "Duplicate remote ID: %s" msgstr "重複するリモート ID: %s" msgid "EC2 access key not found." msgstr "EC2 アクセスキーが見つかりません。" msgid "EC2 signature not supplied." msgstr "EC2 の署名が提供されていません。" msgid "" "Either --bootstrap-password argument or OS_BOOTSTRAP_PASSWORD must be set." msgstr "" "--bootstrap-password 引数または OS_BOOTSTRAP_PASSWORD いずれかを設定する必要" "があります。" msgid "Enabled field must be a boolean" msgstr "「有効」フィールドはブール値でなければなりません" msgid "Enabled field should be a boolean" msgstr "「有効」フィールドはブール値でなければなりません" #, python-format msgid "Endpoint %(endpoint_id)s not found in project %(project_id)s" msgstr "" "エンドポイント %(endpoint_id)s がプロジェクト %(project_id)s に見つかりません" msgid "Endpoint Group Project Association not found" msgstr "エンドポイントグループとプロジェクトの関連付けが見つかりません" msgid "Ensure configuration option idp_entity_id is set." msgstr "設定オプション idp_entity_id が設定されていることを確認してください。" msgid "Ensure configuration option idp_sso_endpoint is set." msgstr "" "設定オプション idp_sso_endpoint が設定されていることを確認してください。" #, python-format msgid "" "Error parsing configuration file for domain: %(domain)s, file: %(file)s." msgstr "" "ドメイン: %(domain)s、ファイル: %(file)s の設定ファイルの構文解析エラー。" #, python-format msgid "Error while opening file %(path)s: %(err)s" msgstr "ファイル %(path)s のオープン中にエラーが発生しました: %(err)s" #, python-format msgid "Error while parsing line: '%(line)s': %(err)s" msgstr "行: '%(line)s' の解析中にエラーが発生しました: %(err)s" #, python-format msgid "Error while parsing rules %(path)s: %(err)s" msgstr "ルール %(path)s の解析中にエラーが発生しました: %(err)s" #, python-format msgid "Error while reading metadata file, %(reason)s" msgstr "メタデータファイルの読み取り中にエラーが発生しました。%(reason)s" #, python-format msgid "" "Exceeded attempts to register domain %(domain)s to use the SQL driver, the " "last domain that appears to have had it is %(last_domain)s, giving up" msgstr "" "SQL ドライバーを使用するためのドメイン %(domain)s の登録の試行回数が制限を超" "過しました。最後に登録されたと思われるドメインは %(last_domain)s です。中断し" "ます" #, python-format msgid "Expected dict or list: %s" msgstr "期待される辞書またはリスト: %s" msgid "" "Expected signing certificates are not available on the server. Please check " "Keystone configuration." msgstr "" "想定された署名証明書がサーバーにありません。Keystone の設定を確認してくださ" "い。" #, python-format msgid "" "Expecting to find %(attribute)s in %(target)s - the server could not comply " "with the request since it is either malformed or otherwise incorrect. The " "client is assumed to be in error." msgstr "" "%(target)s に %(attribute)s があることが想定されています。要求の形式が不正も" "しくは正しくないため、サーバーは要求に応じることができませんでした。クライア" "ントでエラーが発生していると考えられます。" #, python-format msgid "Failed to start the %(name)s server" msgstr "%(name)s サーバーの起動に失敗しました" msgid "Failed to validate token" msgstr "トークンの検証に失敗しました" #, fuzzy msgid "Federation token is expired" msgstr "連合トークンの有効期限が切れています" #, python-format msgid "" "Field \"remaining_uses\" is set to %(value)s while it must not be set in " "order to redelegate a trust" msgstr "" "フィールド \"remaining_uses\" は %(value)s になっていますが、トラストを再委任" "するにはこのフィールドが設定されていてはなりません" msgid "Found invalid token: scoped to both project and domain." msgstr "" "無効なトークンが見つかりました: スコープがプロジェクトとドメインの両方に対し" "て設定されています。" #, python-format msgid "Group %s not found in config" msgstr "グループ %s が設定内に見つかりません" #, python-format msgid "Group %(group)s is not supported for domain specific configurations" msgstr "ドメイン固有の設定ではグループ %(group)s はサポートされません" #, python-format msgid "" "Group %(group_id)s returned by mapping %(mapping_id)s was not found in the " "backend." msgstr "" "マッピング %(mapping_id)s が返したグループ %(group_id)s がバックエンドにあり" "ませんでした。" #, python-format msgid "" "Group membership across backend boundaries is not allowed, group in question " "is %(group_id)s, user is %(user_id)s" msgstr "" "バックエンド境界をまたぐグループメンバーシップは許可されていません。問題と" "なっているグループは %(group_id)s、ユーザーは %(user_id)s です" #, python-format msgid "ID attribute %(id_attr)s not found in LDAP object %(dn)s" msgstr "ID 属性 %(id_attr)s が LDAP オブジェクト %(dn)s に見つかりません" #, python-format msgid "Identity Provider %(idp)s is disabled" msgstr "ID プロバイダー %(idp)s は無効になっています" msgid "" "Incoming identity provider identifier not included among the accepted " "identifiers." msgstr "受諾した ID の中に着信 ID プロバイダーの ID が含まれません。" msgid "Invalid EC2 signature." msgstr "無効な EC2 の署名。" #, python-format msgid "Invalid LDAP TLS certs option: %(option)s. Choose one of: %(options)s" msgstr "" "無効な LDAP TLS 証明書オプション %(option)s です。 %(options)s のいずれかを選" "択してください" #, python-format msgid "Invalid LDAP TLS_AVAIL option: %s. TLS not available" msgstr "無効な LDAP TLS_AVAIL オプション %s です。TLS が利用できません。" #, python-format msgid "Invalid LDAP deref option: %(option)s. Choose one of: %(options)s" msgstr "" "無効な LDAP deref オプション %(option)s です。%(options)s のいずれかを選択し" "てください" #, python-format msgid "Invalid LDAP scope: %(scope)s. Choose one of: %(options)s" msgstr "" "無効な LDAP スコープ %(scope)s です。 %(options)s のいずれかを選んでくださ" "い: " msgid "Invalid TLS / LDAPS combination" msgstr "無効な TLS / LDAPS の組み合わせです" #, python-format msgid "Invalid audit info data type: %(data)s (%(type)s)" msgstr "無効な監査情報データタイプ %(data)s (%(type)s) です" msgid "Invalid blob in credential" msgstr "クレデンシャル内の blob が無効です" #, python-format msgid "" "Invalid domain name: %(domain)s found in config file name: %(file)s - " "ignoring this file." msgstr "" "無効なドメイン名 %(domain)s が設定ファイル名 %(file)s に見つかりました。この" "ファイルは無視されます。" #, python-format msgid "Invalid domain specific configuration: %(reason)s" msgstr "無効なドメイン固有の設定です: %(reason)s" #, python-format msgid "Invalid input for field '%(path)s'. The value is '%(value)s'." msgstr "フィールド '%(path)s' の入力が無効です。値は '%(value)s' です。" #, fuzzy msgid "Invalid limit value" msgstr "制限値が無効です" #, python-format msgid "" "Invalid mix of entities for policy association - only Endpoint, Service or " "Region+Service allowed. Request was - Endpoint: %(endpoint_id)s, Service: " "%(service_id)s, Region: %(region_id)s" msgstr "" "ポリシー関連付けのエンティティーの組み合わせが無効です。エンドポイント、サー" "ビス、または領域とサービスのみ許可されています。要求 - エンドポイント: " "%(endpoint_id)s、サービス: %(service_id)s、領域: %(region_id)s" #, python-format msgid "" "Invalid rule: %(identity_value)s. Both 'groups' and 'domain' keywords must " "be specified." msgstr "" "無効なルール: %(identity_value)s。「グループ」と「ドメイン」の両方のキーワー" "ドを指定する必要があります。" msgid "Invalid signature" msgstr "シグニチャーが無効です" msgid "Invalid user / password" msgstr "ユーザー/パスワードが無効です" msgid "Invalid username or TOTP passcode" msgstr "無効なユーザー名または TOTP パスコード" msgid "Invalid username or password" msgstr "無効なユーザー名かパスワード" #, python-format msgid "KVS region %s is already configured. Cannot reconfigure." msgstr "KVS 領域 %s は既に構成されています。再構成はできません。" #, python-format msgid "Key Value Store not configured: %s" msgstr "キーバリューストアが設定されていません: %s" #, python-format msgid "LDAP %s create" msgstr "LDAP %s の作成" #, python-format msgid "LDAP %s delete" msgstr "LDAP %s の削除" #, python-format msgid "LDAP %s update" msgstr "LDAP %s の更新" msgid "" "Length of transformable resource id > 64, which is max allowed characters" msgstr "" "変換可能なリソース ID の長さは最大許容文字数である、64 文字より少なくなりま" "す。" #, python-format msgid "" "Local section in mapping %(mapping_id)s refers to a remote match that " "doesn't exist (e.g. {0} in a local section)." msgstr "" "マッピング %(mapping_id)s にあるローカルセクションは、存在しないリモートの一" "致 (例えばローカルセクションの {0}) を参照します。" #, python-format msgid "Lock Timeout occurred for key, %(target)s" msgstr "キー %(target)s についてロックタイムアウトが発生しました" #, python-format msgid "Lock key must match target key: %(lock)s != %(target)s" msgstr "" "ロックキーはターゲットキーと一致しなければなりません: %(lock)s != %(target)s" #, python-format msgid "Malformed endpoint URL (%(endpoint)s), see ERROR log for details." msgstr "" "エンドポイント URL (%(endpoint)s) の形式が正しくありません。詳しくはエラーロ" "グを参照してください。" msgid "Marker could not be found" msgstr "マーカーが見つかりませんでした" #, python-format msgid "Max hierarchy depth reached for %s branch." msgstr "%s ブランチに到達する最大の階層の深さ。" #, python-format msgid "Maximum lock attempts on %s occurred." msgstr "%s に対してロックが最大回数まで試みられました。" #, python-format msgid "Member %(member)s is already a member of group %(group)s" msgstr "メンバー %(member)s は既にグループ %(group)s のメンバーです" #, python-format msgid "Method not callable: %s" msgstr "メソッドが呼び出し可能ではありません: %s" msgid "Missing entity ID from environment" msgstr "環境情報にエンティティー ID が見つかりません" msgid "" "Modifying \"redelegation_count\" upon redelegation is forbidden. Omitting " "this parameter is advised." msgstr "" "再委任時の「redelegation_count」の変更は禁止されています。このパラメーターは" "指定しないでください。" msgid "Multiple domains are not supported" msgstr "複数のドメインはサポートされていません" msgid "Must be called within an active lock context." msgstr "アクティブなロックコンテキスト内で呼び出されなければなりません。" msgid "Must specify either domain or project" msgstr "ドメインまたはプロジェクトのいずれかを指定する必要があります" msgid "Name field is required and cannot be empty" msgstr "「名前」フィールドは必須フィールドであり、空にできません" msgid "Neither Project Domain ID nor Project Domain Name was provided." msgstr "" "プロジェクトドメイン ID および プロジェクトドメイン名のいずれも指定されません" "でした。" msgid "" "No Authorization headers found, cannot proceed with OAuth related calls, if " "running under HTTPd or Apache, ensure WSGIPassAuthorization is set to On." msgstr "" "認可に使用するヘッダーが見つからず、OAuth 関連の呼び出しを続行できません。" "HTTPd または Apache の下で実行している場合は、WSGIPassAuthorization が On に" "設定されていることを確認してください。" msgid "No authenticated user" msgstr "認証されていないユーザー" msgid "" "No encryption keys found; run keystone-manage fernet_setup to bootstrap one." msgstr "" "暗号鍵が見つかりません。keystone-manage fernet_setup を実行して暗号鍵を初期設" "定します。" msgid "No options specified" msgstr "オプションが指定されていません" #, python-format msgid "No policy is associated with endpoint %(endpoint_id)s." msgstr "" "エンドポイント %(endpoint_id)s に関連付けられているポリシーはありません。" #, python-format msgid "No remaining uses for trust: %(trust_id)s" msgstr "トラストはこれ以上使用できません: %(trust_id)s" msgid "No token in the request" msgstr "要求にトークンがありません" msgid "Non-default domain is not supported" msgstr "デフォルト以外のドメインはサポートされません" msgid "One of the trust agents is disabled or deleted" msgstr "トラストエージェントの 1 つが無効になっているか削除されています" #, python-format msgid "" "Option %(option)s found with no group specified while checking domain " "configuration request" msgstr "" "ドメイン設定要求の検査中に、グループが指定されていないオプション %(option)s " "が見つかりました" #, python-format msgid "" "Option %(option)s in group %(group)s is not supported for domain specific " "configurations" msgstr "" "ドメイン固有の設定ではグループ %(group)s のオプション %(option)s はサポートさ" "れていません" #, python-format msgid "Project (%s)" msgstr "プロジェクト (%s)" #, python-format msgid "Project ID not found: %(t_id)s" msgstr "プロジェクト ID が見つかりません: %(t_id)s" msgid "Project field is required and cannot be empty." msgstr "プロジェクトフィールドは必須であり、空にできません。" #, python-format msgid "Project is disabled: %s" msgstr "プロジェクト %s が無効になっています" msgid "Project name cannot contain reserved characters." msgstr "プロジェクト名に予約済み文字が含まれていてはなりません。" msgid "Query string is not UTF-8 encoded" msgstr "照会文字列は、UTF-8 でエンコードされていません" #, python-format msgid "" "Reading the default for option %(option)s in group %(group)s is not supported" msgstr "" "グループ %(group)s のオプション %(option)s のデフォルトの読み取りはサポートさ" "れません" msgid "Redelegation allowed for delegated by trust only" msgstr "再委任はトラストによる委任にのみ許可されます" #, python-format msgid "" "Remaining redelegation depth of %(redelegation_depth)d out of allowed range " "of [0..%(max_count)d]" msgstr "" "%(redelegation_depth)d の残りの再委任の深さが、許可された範囲 [0.." "%(max_count)d] を超えています" msgid "" "Remove admin_crud_extension from the paste pipeline, the admin_crud " "extension is now always available. Updatethe [pipeline:admin_api] section in " "keystone-paste.ini accordingly, as it will be removed in the O release." msgstr "" "admin_crud_extension を Paste のパイプラインから削除したため、admin_crud 拡張" "を常時使用できるようになりました。これは O リリースで削除される予定であるた" "め、それに応じて keystone-paste.ini 内の [pipeline:admin_api] セクションを更" "新してください。" msgid "" "Remove endpoint_filter_extension from the paste pipeline, the endpoint " "filter extension is now always available. Update the [pipeline:api_v3] " "section in keystone-paste.ini accordingly as it will be removed in the O " "release." msgstr "" "endpoint_filter_extension を Paste パイプラインから削除したため、エンドポイン" "トフィルター拡張を常時使用できるようになりました。これは O リリースで削除され" "る予定であるため、それに応じて keystone-paste.ini 内の [pipeline:api_v3] セク" "ションを更新してください。" msgid "" "Remove federation_extension from the paste pipeline, the federation " "extension is now always available. Update the [pipeline:api_v3] section in " "keystone-paste.ini accordingly, as it will be removed in the O release." msgstr "" "federation_extension を Paste パイプラインから削除したため、フェデレーション" "拡張を常時使用できるようになりました。これは O リリースで削除される予定である" "ため、それに応じて keystone-paste.ini 内の [pipeline:api_v3] セクションを更新" "してください。" msgid "" "Remove oauth1_extension from the paste pipeline, the oauth1 extension is now " "always available. Update the [pipeline:api_v3] section in keystone-paste.ini " "accordingly, as it will be removed in the O release." msgstr "" "oauth1_extension を Paste パイプラインから削除したため、oauth1 拡張を常時使用" "できるようになりました。これは O リリースで削除される予定であるため、それに応" "じて keystone-paste.ini 内の [pipeline:api_v3] セクションを更新してくださ" "い。" msgid "" "Remove revoke_extension from the paste pipeline, the revoke extension is now " "always available. Update the [pipeline:api_v3] section in keystone-paste.ini " "accordingly, as it will be removed in the O release." msgstr "" "revoke_extension を Paste パイプラインから削除したため、取り消し拡張を常時使" "用できるようになりました。これは O リリースで削除される予定であるため、それに" "応じて keystone-paste.ini 内の [pipeline:api_v3] セクションを更新してくださ" "い。" msgid "" "Remove simple_cert from the paste pipeline, the PKI and PKIz token providers " "are now deprecated and simple_cert was only used insupport of these token " "providers. Update the [pipeline:api_v3] section in keystone-paste.ini " "accordingly, as it will be removed in the O release." msgstr "" "simple_cert を Paste パイプラインから削除したため、PKI および PKIz のトークン" "プロバイダーは非推奨となりました。これらのトークンプロバイダーのサポートに使" "用されていたのは simple_cert のみでした。これは O リリースで削除される予定で" "あるため、それに応じて keystone-paste.ini 内の [pipeline:api_v3] セクションを" "更新してください。" msgid "" "Remove user_crud_extension from the paste pipeline, the user_crud extension " "is now always available. Updatethe [pipeline:public_api] section in keystone-" "paste.ini accordingly, as it will be removed in the O release." msgstr "" "user_crud_extension を Paste パイプラインから削除したため、user_crud 拡張を常" "時使用できるようになりました。 これは O リリースで削除される予定であるため、" "それに応じて keystone-paste.ini 内の [pipeline:public_api] セクションを更新し" "てください。" msgid "Request Token does not have an authorizing user id" msgstr "要求されたトークンに許可ユーザー ID が含まれていません" #, python-format msgid "" "Request attribute %(attribute)s must be less than or equal to %(size)i. The " "server could not comply with the request because the attribute size is " "invalid (too large). The client is assumed to be in error." msgstr "" "要求された属性 %(attribute)s のサイズは %(size)i 以下でなければなりません。属" "性のサイズが無効である (大きすぎる) ため、サーバーは要求に応じることができま" "せんでした。クライアントでエラーが発生していると考えられます。" #, fuzzy msgid "Request must have an origin query parameter" msgstr "要求には起点照会パラメーターが必要です" msgid "Request token is expired" msgstr "要求トークンの有効期限が切れています" msgid "Request token not found" msgstr "要求されたトークンが見つかりません" msgid "Requested expiration time is more than redelegated trust can provide" msgstr "要求された有効期限は再委任されたトラストが提供可能な期間を超えています" #, python-format msgid "" "Requested redelegation depth of %(requested_count)d is greater than allowed " "%(max_count)d" msgstr "" "要求された再委任の深さ %(requested_count)d が、許可された上限 %(max_count)d " "を超えています" msgid "" "Running keystone via eventlet is deprecated as of Kilo in favor of running " "in a WSGI server (e.g. mod_wsgi). Support for keystone under eventlet will " "be removed in the \"M\"-Release." msgstr "" "eventlet を介した keystone の実行は Kilo 以降では推奨されておらず、WSGI サー" "バー (mod_wsgi など) での実行が推奨されています。eventlet 下での keystone の" "サポートは「M」リリースで削除される予定です。" msgid "Scoping to both domain and project is not allowed" msgstr "ドメインとプロジェクトの両方にスコープを設定することはできません" msgid "Scoping to both domain and trust is not allowed" msgstr "ドメインとトラストの両方にスコープを設定することはできません" msgid "Scoping to both project and trust is not allowed" msgstr "プロジェクトとトラストの両方にスコープを設定することはできません" #, python-format msgid "Service Provider %(sp)s is disabled" msgstr "サービスプロバイダー %(sp)s は無効になっています" msgid "Some of requested roles are not in redelegated trust" msgstr "要求されたロールの一部が再委任されたトラスト内にありません" msgid "Specify a domain or project, not both" msgstr "ドメインかプロジェクトを指定してください。両方は指定しないでください" msgid "Specify a user or group, not both" msgstr "ユーザーかグループを指定してください。両方は指定しないでください" msgid "Specify one of domain or project" msgstr "ドメインまたはプロジェクトのいずれかを指定してください" msgid "Specify one of user or group" msgstr "ユーザーまたはグループのいずれかを指定してください" #, python-format msgid "" "String length exceeded.The length of string '%(string)s' exceeded the limit " "of column %(type)s(CHAR(%(length)d))." msgstr "" "文字列が長過ぎます。文字列 %(string)s' の長さが列 %(type)s(CHAR(%(length)d)) " "の制限を超えました。" msgid "Tenant name cannot contain reserved characters." msgstr "テナント名に予約済み文字が含まれていてはなりません。" #, python-format msgid "" "The %s extension has been moved into keystone core and as such its " "migrations are maintained by the main keystone database control. Use the " "command: keystone-manage db_sync" msgstr "" "%s 拡張が keystone コアに移動されているため、そのマイグレーションはメインの " "keystone データベース制御によって維持されます。次のコマンドを使用します: " "keystone-manage db_sync" msgid "" "The 'expires_at' must not be before now. The server could not comply with " "the request since it is either malformed or otherwise incorrect. The client " "is assumed to be in error." msgstr "" "'expires_at' は現時点以前であってはなりません。要求の形式が誤っているか、要求" "が正しくないために、サーバーはこの要求に応じることが出来ませんでした。クライ" "アントでエラーが発生していると考えられます。" msgid "The --all option cannot be used with the --domain-name option" msgstr "--all オプションを --domain-name オプションと併用することはできません" #, python-format msgid "The Keystone configuration file %(config_file)s could not be found." msgstr "Keystone 設定ファイル %(config_file)s が見つかりませんでした。" #, python-format msgid "" "The Keystone domain-specific configuration has specified more than one SQL " "driver (only one is permitted): %(source)s." msgstr "" "keystone ドメイン固有設定で複数の SQL ドライバーが指定されています (1 つしか" "指定できません): %(source)s。" msgid "The action you have requested has not been implemented." msgstr "要求したアクションは実装されていません。" #, fuzzy msgid "The authenticated user should match the trustor." msgstr "認証ユーザーは委託者と一致している必要があります。" msgid "" "The certificates you requested are not available. It is likely that this " "server does not use PKI tokens otherwise this is the result of " "misconfiguration." msgstr "" "要求された証明書がありません。このサーバーでは PKI トークンが使用されていない" "か、そうでない場合は設定が間違っていると考えられます。 " msgid "The configured token provider does not support bind authentication." msgstr "設定済みトークンプロバイダーはバインド認証をサポートしません。" msgid "The creation of projects acting as domains is not allowed in v2." msgstr "v2 では、ドメインとして動作するプロジェクトの作成は許可されません。" #, python-format msgid "" "The password length must be less than or equal to %(size)i. The server could " "not comply with the request because the password is invalid." msgstr "" "パスワードの長さは %(size)i 以下でなければなりません。パスワードが無効である" "ため、サーバーは要求に応じることができませんでした。" msgid "The request you have made requires authentication." msgstr "実行された要求には認証が必要です。" msgid "The resource could not be found." msgstr "リソースが見つかりませんでした。" msgid "" "The revoke call must not have both domain_id and project_id. This is a bug " "in the Keystone server. The current request is aborted." msgstr "" "取り消し呼び出しに domain_id と project_id の両方を使用することはできません。" "これは、Keystone サーバーにおけるバグです。現在の要求は打ち切られます。" msgid "The service you have requested is no longer available on this server." msgstr "要求したサービスは現在このサーバーでは使用できません。" #, python-format msgid "" "The specified parent region %(parent_region_id)s would create a circular " "region hierarchy." msgstr "" "指定された親リージョン %(parent_region_id)s では、リージョン階層構造でループ" "が発生してしまいます。" #, python-format msgid "" "The value of group %(group)s specified in the config should be a dictionary " "of options" msgstr "" "設定で指定されたグループ %(group)s の値はオプションの辞書にする必要があります" msgid "There should not be any non-oauth parameters" msgstr "oauth 関連以外のパラメーターが含まれていてはいけません" #, python-format msgid "This is not a recognized Fernet payload version: %s" msgstr "これは認識可能な Fernet ペイロードバージョンではありません: %s" #, python-format msgid "This is not a recognized Fernet token %s" msgstr "これは認識可能な Fernet トークン %s ではありません" msgid "" "Timestamp not in expected format. The server could not comply with the " "request since it is either malformed or otherwise incorrect. The client is " "assumed to be in error." msgstr "" "タイムスタンプが想定された形式になっていません。要求の形式が不正もしくは正し" "くないため、サーバーは要求に応じることができませんでした。クライアントでエ" "ラーが発生していると考えられます。" #, python-format msgid "" "To get a more detailed information on this error, re-run this command for " "the specific domain, i.e.: keystone-manage domain_config_upload --domain-" "name %s" msgstr "" "このエラーに関する詳細を得るには、特定ドメインに対してこのコマンドを再実行し" "てください: keystone-manage domain_config_upload --domain-name %s" msgid "Token belongs to another user" msgstr "トークンが別のユーザーに属しています" msgid "Token does not belong to specified tenant." msgstr "トークンが指定されたテナントに所属していません。" msgid "Token version is unrecognizable or unsupported." msgstr "トークンバージョンが認識できないかサポートされません。" #, fuzzy msgid "Trustee has no delegated roles." msgstr "受託者に委任された役割がありません。" #, fuzzy msgid "Trustor is disabled." msgstr "委託者は無効です。" #, python-format msgid "" "Trying to update group %(group)s, so that, and only that, group must be " "specified in the config" msgstr "" "グループ %(group)s を更新しようとしていますが、その場合は設定でグループのみを" "指定する必要があります" #, python-format msgid "" "Trying to update option %(option)s in group %(group)s, but config provided " "contains option %(option_other)s instead" msgstr "" "グループ %(group)s のオプション %(option)s を更新しようとしましたが、指定され" "た設定には代わりにオプション %(option_other)s が含まれています" #, python-format msgid "" "Trying to update option %(option)s in group %(group)s, so that, and only " "that, option must be specified in the config" msgstr "" "グループ %(group)s のオプション %(option)s を更新しようとしていますが、その場" "合は設定でオプションのみを指定する必要があります" msgid "" "Unable to access the keystone database, please check it is configured " "correctly." msgstr "" "keystone データベースにアクセスできません。このデータベースが正しく設定されて" "いるかどうかを確認してください。" #, fuzzy, python-format msgid "Unable to consume trust %(trust_id)s, unable to acquire lock." msgstr "トラスト %(trust_id)s を消費できず、ロックを取得できません。" #, python-format msgid "" "Unable to delete region %(region_id)s because it or its child regions have " "associated endpoints." msgstr "" "リージョン %(region_id)s またはその子リージョンがエンドポイントに関連付けられ" "ているため、このリージョンを削除できません。" msgid "Unable to downgrade schema" msgstr "スキーマをダウングレードすることができません" #, python-format msgid "Unable to find valid groups while using mapping %(mapping_id)s" msgstr "" "マッピング %(mapping_id)s を使用する際に、有効なグループが見つかりませんでし" "た" #, python-format msgid "Unable to locate domain config directory: %s" msgstr "ドメイン設定ディレクトリーが見つかりません: %s" #, python-format msgid "Unable to lookup user %s" msgstr "ユーザー %s を検索できません" #, fuzzy, python-format msgid "" "Unable to reconcile identity attribute %(attribute)s as it has conflicting " "values %(new)s and %(old)s" msgstr "" "ID 属性 %(attribute)s に競合する値 %(new)s と %(old)s が含まれているため、調" "整できません" #, python-format msgid "" "Unable to sign SAML assertion. It is likely that this server does not have " "xmlsec1 installed, or this is the result of misconfiguration. Reason " "%(reason)s" msgstr "" "SAML アサーションに署名できません。このサーバーに xmlsec1 がインストールされ" "ていないか、設定が誤っているためと考えられます。理由: %(reason)s" msgid "Unable to sign token." msgstr "トークンに署名できません。" #, python-format msgid "Unexpected assignment type encountered, %s" msgstr "予期しない割り当てタイプが検出されました。%s" #, python-format msgid "" "Unexpected combination of grant attributes - User: %(user_id)s, Group: " "%(group_id)s, Project: %(project_id)s, Domain: %(domain_id)s" msgstr "" "認可属性 の組み合わせ (ユーザー: %(user_id)s、グループ: %(group_id)s、プロ" "ジェクト: %(project_id)s、ドメイン: %(domain_id)s) が正しくありません。" #, python-format msgid "Unexpected status requested for JSON Home response, %s" msgstr "JSON Home 応答に対して予期しない状況が要求されました。%s" msgid "Unknown Target" msgstr "不明なターゲット" #, python-format msgid "Unknown domain '%(name)s' specified by --domain-name" msgstr "不明なドメイン '%(name)s' が --domain-name で指定されました" #, python-format msgid "Unknown token version %s" msgstr "トークンバージョン %s は不明です" #, python-format msgid "Unregistered dependency: %(name)s for %(targets)s" msgstr "未登録の依存関係: %(targets)s に対する %(name)s" msgid "Update of `domain_id` is not allowed." msgstr "`domain_id` の更新は許可されていません。" msgid "Update of `is_domain` is not allowed." msgstr "`is_domain` の更新は許可されません。" msgid "Update of `parent_id` is not allowed." msgstr "\"parent_id\" の更新は許可されていません。" msgid "Update of domain_id is only allowed for root projects." msgstr "domain_id の更新が許可されるのは root プロジェクトのみです。" msgid "Update of domain_id of projects acting as domains is not allowed." msgstr "" "ドメインとして動作するプロジェクトの domain_id の更新は許可されません。" msgid "Use a project scoped token when attempting to create a SAML assertion" msgstr "" "SAML アサーションの作成を行うときは、プロジェクトにスコープが設定されたトーク" "ンを使用してください" msgid "" "Use of the identity driver config to automatically configure the same " "assignment driver has been deprecated, in the \"O\" release, the assignment " "driver will need to be expicitly configured if different than the default " "(SQL)." msgstr "" "同一の割り当てドライバーを自動的に設定するための ID ドライバー設定の使用は、" "提供を終了しました。 \"O\" リリースでは、デフォルト (SQL) 以外の場合は割り当" "てドライバーを明示的に設定する必要があります。" #, python-format msgid "User %(u_id)s is unauthorized for tenant %(t_id)s" msgstr "ユーザー %(u_id)s はテナント %(t_id)s のアクセス権限がありません。" #, python-format msgid "User %(user_id)s has no access to domain %(domain_id)s" msgstr "" "ユーザー %(user_id)s はドメイン %(domain_id)s へのアクセス権限がありません" #, python-format msgid "User %(user_id)s has no access to project %(project_id)s" msgstr "" "ユーザー %(user_id)s はプロジェクト %(project_id)s へのアクセス権限がありませ" "ん" #, python-format msgid "User %(user_id)s is already a member of group %(group_id)s" msgstr "ユーザー %(user_id)s はすでにグループ %(group_id)s のメンバーです" #, python-format msgid "User '%(user_id)s' not found in group '%(group_id)s'" msgstr "ユーザー '%(user_id)s' がグループ '%(group_id)s' で見つかりません" msgid "User IDs do not match" msgstr "ユーザー ID が一致しません" msgid "" "User auth cannot be built due to missing either user id, or user name with " "domain id, or user name with domain name." msgstr "" "ユーザー ID、ドメイン ID が指定されたユーザー名、ドメイン名が指定されたユー" "ザー名のいずれかが欠落しているため、ユーザー認証を作成できません。" #, python-format msgid "User is disabled: %s" msgstr "ユーザーが無効になっています: %s" msgid "User is not a member of the requested project" msgstr "ユーザーは、要求されたプロジェクトのメンバーではありません" #, fuzzy msgid "User is not a trustee." msgstr "ユーザーは受託者ではありません。" msgid "User not found" msgstr "ユーザーが見つかりません" msgid "User not valid for tenant." msgstr "ユーザーはテナントに対して無効です。" msgid "User roles not supported: tenant_id required" msgstr "ユーザーロールがサポートされません: tenant_id が必要です" #, fuzzy, python-format msgid "User type %s not supported" msgstr "ユーザータイプ %s はサポートされていません" msgid "You are not authorized to perform the requested action." msgstr "要求されたアクションを実行する許可がありません。" #, python-format msgid "You are not authorized to perform the requested action: %(action)s" msgstr "要求されたアクションを実行する許可がありません: %(action)s" msgid "" "You have tried to create a resource using the admin token. As this token is " "not within a domain you must explicitly include a domain for this resource " "to belong to." msgstr "" "管理トークンを使用してリソースを作成しようとしています。このトークンはドメイ" "ン内にないため、このリソースが属するドメインを明示的に含める必要があります。" msgid "`key_mangler` functions must be callable." msgstr "`key_mangler` 関数は呼び出し可能でなければなりません。" msgid "`key_mangler` option must be a function reference" msgstr "`key_mangler` オプションは関数参照でなければなりません" msgid "any options" msgstr "任意のオプション" msgid "auth_type is not Negotiate" msgstr "auth_type はネゴシエートではありません" msgid "authorizing user does not have role required" msgstr "ユーザーを認可するのに必要なロールがありません" #, python-format msgid "cannot create a project in a branch containing a disabled project: %s" msgstr "" "無効になっているプロジェクトを含むブランチにプロジェクトを作成することはでき" "ません: %s" #, python-format msgid "" "cannot delete an enabled project acting as a domain. Please disable the " "project %s first." msgstr "" "ドメインとして動作する有効になっているプロジェクトを削除できません。最初にプ" "ロジェクト %s を無効にしてください。" #, python-format msgid "group %(group)s" msgstr "グループ %(group)s" msgid "" "idp_contact_type must be one of: [technical, other, support, administrative " "or billing." msgstr "" "idp_contact_type は technical、other、support、administrative、billing のいず" "れかでなければなりません。" #, python-format msgid "invalid date format %s" msgstr "日付形式 %s は無効です" #, python-format msgid "" "it is not permitted to have two projects acting as domains with the same " "name: %s" msgstr "" "ドメインとして動作する同じ名前の 2 つのプロジェクトが存在することは許可されま" "せん: %s" #, python-format msgid "" "it is not permitted to have two projects within a domain with the same " "name : %s" msgstr "" "1 つのドメイン内に同じ名前の 2 つのプロジェクトが存在することは許可されませ" "ん : %s" msgid "only root projects are allowed to act as domains." msgstr "ドメインとして動作することが許可されるのは root プロジェクトのみです。" #, python-format msgid "option %(option)s in group %(group)s" msgstr "グループ %(group)s のオプション %(option)s" msgid "provided consumer key does not match stored consumer key" msgstr "" "指定されたコンシューマー鍵は保存されているコンシューマー鍵と一致しません" msgid "provided request key does not match stored request key" msgstr "指定された要求鍵は保管されている要求鍵と一致しません" #, fuzzy msgid "provided verifier does not match stored verifier" msgstr "指定されたベリファイヤーは保管済みベリファイヤーと一致しません" msgid "remaining_uses must be a positive integer or null." msgstr "remaining_uses は正整数またはヌルでなければなりません。" msgid "remaining_uses must not be set if redelegation is allowed" msgstr "再委任が許可されている場合は remaining_uses を設定してはなりません" #, python-format msgid "" "request to update group %(group)s, but config provided contains group " "%(group_other)s instead" msgstr "" "グループ %(group)s の更新を要求しましたが、指定された設定には代わりにグルー" "プ %(group_other)s が含まれています" msgid "rescope a scoped token" msgstr "スコープが設定されたトークンのスコープを設定し直します" #, python-format msgid "role %s is not defined" msgstr "ロール %s は定義されていません" msgid "scope.project.id must be specified if include_subtree is also specified" msgstr "" "include_subtree も指定される場合、scope.project.id を指定する必要があります。" #, python-format msgid "tls_cacertdir %s not found or is not a directory" msgstr "" "tls_cacertdir %s が見つからない、もしくは、ディレクトリではありません。" #, python-format msgid "tls_cacertfile %s not found or is not a file" msgstr "tls_cacertfile %s が見つからない、もしくは、ファイルではありません。" #, python-format msgid "token reference must be a KeystoneToken type, got: %s" msgstr "トークン参照は KeystoneToken 型である必要があります。%s を受信しました" msgid "" "update of domain_id is deprecated as of Mitaka and will be removed in O." msgstr "" "domain_id の更新は Mitaka の時点で提供を終了し、O で削除される予定です。" #, python-format msgid "" "validated expected to find %(param_name)r in function signature for " "%(func_name)r." msgstr "" "検証され、%(func_name)r の関数のシグニチャーで %(param_name)r が見つかること" "が予期されます" keystone-9.0.0/keystone/locale/pt_BR/0000775000567000056710000000000012701407246020633 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/locale/pt_BR/LC_MESSAGES/0000775000567000056710000000000012701407246022420 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/locale/pt_BR/LC_MESSAGES/keystone-log-critical.po0000664000567000056710000000157112701407102027163 0ustar jenkinsjenkins00000000000000# Translations template for keystone. # Copyright (C) 2015 OpenStack Foundation # This file is distributed under the same license as the keystone project. # # Translators: # OpenStack Infra , 2015. #zanata msgid "" msgstr "" "Project-Id-Version: keystone 9.0.0.0b2.dev256\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n" "POT-Creation-Date: 2016-01-18 05:50+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2014-08-31 03:19+0000\n" "Last-Translator: openstackjenkins \n" "Language: pt-BR\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Portuguese (Brazil)\n" #, python-format msgid "Unable to open template file %s" msgstr "Não é possível abrir o arquivo de modelo %s" keystone-9.0.0/keystone/locale/pt_BR/LC_MESSAGES/keystone.po0000664000567000056710000015677012701407105024633 0ustar jenkinsjenkins00000000000000# Translations template for keystone. # Copyright (C) 2015 OpenStack Foundation # This file is distributed under the same license as the keystone project. # # Translators: # Gabriel Wainer, 2013 # Gabriel Wainer, 2013 # Lucas Ribeiro , 2014 # Volmar Oliveira Junior , 2013 # Volmar Oliveira Junior , 2013 # Lucas Palm , 2015. #zanata # OpenStack Infra , 2015. #zanata # Raildo Mascena , 2015. #zanata # Carlos Marques , 2016. #zanata # Lucas Palm , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: keystone 9.0.0.0rc2.dev5\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n" "POT-Creation-Date: 2016-03-21 10:57+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-21 02:57+0000\n" "Last-Translator: Carlos Marques \n" "Language: pt-BR\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Portuguese (Brazil)\n" #, python-format msgid "%(detail)s" msgstr "%(detail)s" #, python-format msgid "%(driver)s is not supported driver version" msgstr "O %(driver)s não é uma versão de driver suportada" #, python-format msgid "" "%(entity)s name cannot contain the following reserved characters: %(chars)s" msgstr "" "O nome %(entity)s não pode conter os caracteres reservados a seguir: " "%(chars)s" #, python-format msgid "" "%(event)s is not a valid notification event, must be one of: %(actions)s" msgstr "" "%(event)s não é um evento de notificação válido, deve ser um de: %(actions)s" #, python-format msgid "%(host)s is not a trusted dashboard host" msgstr "%(host)s não é um host do painel confiável" #, python-format msgid "%(message)s %(amendment)s" msgstr "%(message)s %(amendment)s" #, python-format msgid "" "%(mod_name)s doesn't provide database migrations. The migration repository " "path at %(path)s doesn't exist or isn't a directory." msgstr "" "%(mod_name)s não fornece migrações de banco de dados. O caminho do " "repositório de migração %(path)s não existe ou não é um diretório." #, python-format msgid "%(prior_role_id)s does not imply %(implied_role_id)s" msgstr "%(prior_role_id)s não implica %(implied_role_id)s" #, python-format msgid "%(property_name)s cannot be less than %(min_length)s characters." msgstr "%(property_name)s não pode ter menos de %(min_length)s caracteres." #, python-format msgid "%(property_name)s is not a %(display_expected_type)s" msgstr "%(property_name)s não é um %(display_expected_type)s" #, python-format msgid "%(property_name)s should not be greater than %(max_length)s characters." msgstr "%(property_name)s não deve ter mais de %(max_length)s caracteres." #, python-format msgid "%(role_id)s cannot be an implied roles" msgstr "%(role_id)s não pode ser uma função implícita" #, python-format msgid "%s cannot be empty." msgstr "%s não pode estar vazio." #, python-format msgid "%s extension does not exist." msgstr "Extensão %s não existe." #, python-format msgid "%s field is required and cannot be empty" msgstr "campo %s é obrigatório e não pode estar vazio" #, python-format msgid "%s field(s) cannot be empty" msgstr "%s campo(s) não podem estar vazios" #, python-format msgid "" "%s for the LDAP identity backend has been deprecated in the Mitaka release " "in favor of read-only identity LDAP access. It will be removed in the \"O\" " "release." msgstr "" "%s para o backend de identidade LDAP foi descontinuado na liberação do " "Mitaka a favor do acesso LDAP de identidade somente leitura. Ele será " "removido na liberação \"O\"." msgid "(Disable insecure_debug mode to suppress these details.)" msgstr "(Desative o modo insecure_debug para suprimir esses detalhes)." msgid "--all option cannot be mixed with other options" msgstr "A opção --all não pode ser combinada com outras opções" msgid "A project-scoped token is required to produce a service catalog." msgstr "" "Um token de projeto com escopo é necessário para produzir um catálogo de " "serviços." msgid "Access token is expired" msgstr "Token de acesso expirou" msgid "Access token not found" msgstr "Token de acesso não encontrado" msgid "Additional authentications steps required." msgstr "Passos de autenticação adicionais requeridos." msgid "An unexpected error occurred when retrieving domain configs" msgstr "Ocorreu um erro inesperado ao recuperar as configurações de domínio" #, python-format msgid "An unexpected error occurred when trying to store %s" msgstr "Ocorreu um erro inesperado ao tentar armazenar %s" msgid "An unexpected error prevented the server from fulfilling your request." msgstr "Um erro inesperado evitou que o servidor cumprisse sua solicitação." #, python-format msgid "" "An unexpected error prevented the server from fulfilling your request: " "%(exception)s" msgstr "" "Um erro inesperado evitou que o servidor cumprisse sua solicitação: " "%(exception)s" msgid "An unhandled exception has occurred: Could not find metadata." msgstr "Uma exceção não tratada ocorreu: Não foi possível encontrar metadados." msgid "At least one option must be provided" msgstr "Pelo menos uma opção deve ser fornecida" msgid "At least one option must be provided, use either --all or --domain-name" msgstr "Pelo menos uma opção deve ser fornecida, use --all ou --domain-name" msgid "At least one role should be specified." msgstr "Pelo menos uma função deve ser especificada." #, python-format msgid "" "Attempted automatic driver selection for assignment based upon " "[identity]\\driver option failed since driver %s is not found. Set " "[assignment]/driver to a valid driver in keystone config." msgstr "" "Uma tentativa de seleção de driver automática para designação com base na " "opção [identity]\\driver falhou porque o driver %s não foi localizado. " "Configure o [assignment]/driver para um driver válido na configuração do " "keystone." msgid "Attempted to authenticate with an unsupported method." msgstr "Tentativa de autenticação com um método não suportado." msgid "" "Attempting to use OS-FEDERATION token with V2 Identity Service, use V3 " "Authentication" msgstr "" "Ao tentar usar o token OS-FEDERATION com Serviço de identidade V2, use " "autenticação V3" msgid "Authentication plugin error." msgstr "Erro do plugin de autenticação." #, python-format msgid "" "Backend `%(backend)s` is not a valid memcached backend. Valid backends: " "%(backend_list)s" msgstr "" "Backend `%(backend)s`não é um memcached backend válido. Backends válidos: " "%(backend_list)s" msgid "Cannot authorize a request token with a token issued via delegation." msgstr "" "Não é possível autorizar um token de solicitação com um token emitido por " "meio de delegação." #, python-format msgid "Cannot change %(option_name)s %(attr)s" msgstr "Não é possível alterar %(option_name)s %(attr)s" msgid "Cannot change Domain ID" msgstr "Não é possível alterar o ID do Domínio" msgid "Cannot change user ID" msgstr "Não é possível alterar o ID do usuário" msgid "Cannot change user name" msgstr "Não é possível alterar o nome de usuário" #, python-format msgid "Cannot create an endpoint with an invalid URL: %(url)s" msgstr "Não é possível criar um endpoint com uma URL inválida: %(url)s" #, python-format msgid "Cannot create project with parent: %(project_id)s" msgstr "Não é possível criar o projeto com o pai: %(project_id)s" #, python-format msgid "" "Cannot create project, since it specifies its owner as domain %(domain_id)s, " "but specifies a parent in a different domain (%(parent_domain_id)s)." msgstr "" "Não é possível criar o projeto porque ele especifica seu proprietário como " "domínio %(domain_id)s, mas especifica um pai em um domínio diferente " "(%(parent_domain_id)s)." #, python-format msgid "" "Cannot create project, since its parent (%(domain_id)s) is acting as a " "domain, but project's specified parent_id (%(parent_id)s) does not match " "this domain_id." msgstr "" "Não é possível criar um projeto porque seu pai (%(domain_id)s) está agindo " "como um domínio, mas o parent_id (%(parent_id)s) especificado do projeto não " "corresponde com esse domain_id." msgid "Cannot delete a domain that is enabled, please disable it first." msgstr "" "Não é possível excluir um domínio que esteja ativado, desative-o primeiro." #, python-format msgid "" "Cannot delete project %(project_id)s since its subtree contains enabled " "projects." msgstr "" "Não é possível excluir o projeto%(project_id)s porque sua subárvore contém " "projetos ativados." #, python-format msgid "" "Cannot delete the project %s since it is not a leaf in the hierarchy. Use " "the cascade option if you want to delete a whole subtree." msgstr "" "Não é possível excluir o projeto %s porque ele não é uma folha na " "hierarquia. Use a opção em cascata se desejar excluir uma subárvore inteira." #, python-format msgid "" "Cannot disable project %(project_id)s since its subtree contains enabled " "projects." msgstr "" "Não é possível desativar o projeto%(project_id)s porque sua subárvore " "contém projetos ativados." #, python-format msgid "Cannot enable project %s since it has disabled parents" msgstr "Não é possível ativar o projeto %s porque ele possui pais desativados" msgid "Cannot list assignments sourced from groups and filtered by user ID." msgstr "" "Não é possível listar designações originadas a partir de grupos e filtradas " "pelo ID do usuário." msgid "Cannot list request tokens with a token issued via delegation." msgstr "" "Não é possível listar os tokens de solicitação com um token emitido por meio " "de delegação." #, python-format msgid "Cannot open certificate %(cert_file)s. Reason: %(reason)s" msgstr "Não é possível abrir o certificado %(cert_file)s. Motivo: %(reason)s" #, python-format msgid "Cannot remove role that has not been granted, %s" msgstr "Não é possível remover role que não foi concedido, %s" msgid "" "Cannot truncate a driver call without hints list as first parameter after " "self " msgstr "" "Não é possível truncar uma chamada de driver sem lista de sugestões como " "primeiro parâmetro após self " msgid "Cannot update domain_id of a project that has children." msgstr "Não é possível atualizar domain_id de um projeto que possua filhos." msgid "" "Cannot use parents_as_list and parents_as_ids query params at the same time." msgstr "" "Não é possível usar parâmetros de consulta parents_as_list e parents_as_ids " "ao mesmo tempo." msgid "" "Cannot use subtree_as_list and subtree_as_ids query params at the same time." msgstr "" "Não é possível usar parâmetros de consulta subtree_as_list e subtree_as_ids " "ao mesmo tempo." msgid "Cascade update is only allowed for enabled attribute." msgstr "A atualização em cascata é permitida somente para atributo ativado." msgid "" "Combining effective and group filter will always result in an empty list." msgstr "" "Combinar efetivo e filtro de grupo sempre resultará em uma lista vazia." msgid "" "Combining effective, domain and inherited filters will always result in an " "empty list." msgstr "" "Combinar efetivo, domínio e filtros herdados sempre resultará em uma lista " "vazia." #, python-format msgid "Config API entity at /domains/%s/config" msgstr "Entidade de API de configuração em /domains/%s/config" #, python-format msgid "Conflict occurred attempting to store %(type)s - %(details)s" msgstr "Ocorreu um conflito ao tentar armazenar %(type)s -%(details)s" #, python-format msgid "Conflicting region IDs specified: \"%(url_id)s\" != \"%(ref_id)s\"" msgstr "" "IDs de região de conflito especificados: \"%(url_id)s\" != \"%(ref_id)s\"" msgid "Consumer not found" msgstr "Consumidor não encontrado" #, python-format msgid "" "Could not change immutable attribute(s) '%(attributes)s' in target %(target)s" msgstr "" "Não foi possível alterar o atributo imutável '%(attributes)s' no destino " "%(target)s" #, python-format msgid "" "Could not determine Identity Provider ID. The configuration option " "%(issuer_attribute)s was not found in the request environment." msgstr "" "Não foi possível determinar o ID do Provedor de Identidade. A opção de " "configuração %(issuer_attribute)s não foi encontrada no ambiente da " "solicitação." #, python-format msgid "" "Could not find %(group_or_option)s in domain configuration for domain " "%(domain_id)s" msgstr "" "Não foi possível localizar %(group_or_option)s na configuração de domínio " "para o domínio %(domain_id)s" #, python-format msgid "Could not find Endpoint Group: %(endpoint_group_id)s" msgstr "Não foi possível localizar o Grupo do Terminal: %(endpoint_group_id)s" msgid "Could not find Identity Provider identifier in environment" msgstr "" "Não foi possível localizar o identificador do Provedor de Identidade no " "ambiente" #, python-format msgid "Could not find Identity Provider: %(idp_id)s" msgstr "Não foi possível localizar o Provedor de Identidade: %(idp_id)s" #, python-format msgid "Could not find Service Provider: %(sp_id)s" msgstr "Não foi possível localizar o Provedor de Serviços: %(sp_id)s" #, python-format msgid "Could not find credential: %(credential_id)s" msgstr "Não foi possível localizar a credencial: %(credential_id)s" #, python-format msgid "Could not find domain: %(domain_id)s" msgstr "Não foi possível localizar o domínio: %(domain_id)s" #, python-format msgid "Could not find endpoint: %(endpoint_id)s" msgstr "Não foi possível localizar terminal: %(endpoint_id)s" #, python-format msgid "" "Could not find federated protocol %(protocol_id)s for Identity Provider: " "%(idp_id)s" msgstr "" "Não foi possível localizar o protocolo federado %(protocol_id)s para o " "Provedor de Identidade: %(idp_id)s" #, python-format msgid "Could not find group: %(group_id)s" msgstr "Não foi possível localizar o grupo: %(group_id)s" #, python-format msgid "Could not find mapping: %(mapping_id)s" msgstr "Não foi possível localizar o mapeamento: %(mapping_id)s" msgid "Could not find policy association" msgstr "Não foi possível localizar a associação de política" #, python-format msgid "Could not find policy: %(policy_id)s" msgstr "Não foi possível localizar a política: %(policy_id)s" #, python-format msgid "Could not find project: %(project_id)s" msgstr "Não foi possível localizar o projeto: %(project_id)s" #, python-format msgid "Could not find region: %(region_id)s" msgstr "Não foi possível localizar a região: %(region_id)s" #, python-format msgid "" "Could not find role assignment with role: %(role_id)s, user or group: " "%(actor_id)s, project or domain: %(target_id)s" msgstr "" "Não foi possível localizar a designação de função com a função: %(role_id)s, " "usuário ou grupo: %(actor_id)s, projeto ou domínio: %(target_id)s" #, python-format msgid "Could not find role: %(role_id)s" msgstr "Não foi possível localizar a função: %(role_id)s" #, python-format msgid "Could not find service: %(service_id)s" msgstr "Não foi possível localizar o serviço: %(service_id)s" #, python-format msgid "Could not find token: %(token_id)s" msgstr "Não foi possível localizar o token: %(token_id)s" #, python-format msgid "Could not find trust: %(trust_id)s" msgstr "Não foi possível localizar a confiança: %(trust_id)s" #, python-format msgid "Could not find user: %(user_id)s" msgstr "Não foi possível localizar o usuário: %(user_id)s" #, python-format msgid "Could not find version: %(version)s" msgstr "Não foi possível localizar a versão: %(version)s" #, python-format msgid "Could not find: %(target)s" msgstr "Não foi possível localizar: %(target)s" msgid "" "Could not map any federated user properties to identity values. Check debug " "logs or the mapping used for additional details." msgstr "" "Não foi possível mapear nenhuma propriedade do usuário federado para valores " "de identidade. Verifique os logs de depuração ou o mapeamento usado para " "obter detalhes adicionais" msgid "" "Could not map user while setting ephemeral user identity. Either mapping " "rules must specify user id/name or REMOTE_USER environment variable must be " "set." msgstr "" "Não foi possível mapear o usuário ao configurar a identidade do usuário " "efêmera. Regras de mapeamento devem especificar o ID/nome do usuário ou a " "variável de ambiente REMOTE_USER deve ser configurada." msgid "Could not validate the access token" msgstr "Não foi possível validar o token de acesso" msgid "Credential belongs to another user" msgstr "A credencial pertence à outro usuário" msgid "Credential signature mismatch" msgstr "Incompatibilidade de assinatura de credencial" #, python-format msgid "" "Direct import of auth plugin %(name)r is deprecated as of Liberty in favor " "of its entrypoint from %(namespace)r and may be removed in N." msgstr "" "A importação direta de um plug-in de autoria %(name)r foi descontinuada a " "partir do Liberty a favor de seu ponto de entrada de %(namespace)r e pode " "ser removida no N." #, python-format msgid "" "Direct import of driver %(name)r is deprecated as of Liberty in favor of its " "entrypoint from %(namespace)r and may be removed in N." msgstr "" "A importação direta de um driver %(name)r foi descontinuada a partir do " "Liberty a favor de seu ponto de entrada de %(namespace)r e pode ser removida " "no N." msgid "" "Disabling an entity where the 'enable' attribute is ignored by configuration." msgstr "" "A desativação de uma entidade em que o atributo ‘enable' é ignorado pelo " "configuração." #, python-format msgid "Domain (%s)" msgstr "Domínio (%s)" #, python-format msgid "Domain cannot be named %s" msgstr "O domínio não pode ser chamado %s" #, python-format msgid "Domain cannot have ID %s" msgstr "O domínio não pode ter o ID de %s" #, python-format msgid "Domain is disabled: %s" msgstr "O domínio está desativado: %s" msgid "Domain name cannot contain reserved characters." msgstr "O nome do domínio não pode conter caracteres reservados." msgid "Domain scoped token is not supported" msgstr "O token de escopo de domínio não é suportado" msgid "Domain specific roles are not supported in the V8 role driver" msgstr "" "Funções específicas de domínio não são suportadas no driver de função da V8" #, python-format msgid "" "Domain: %(domain)s already has a configuration defined - ignoring file: " "%(file)s." msgstr "" "Domínio: %(domain)s já possui uma configuração definida - ignorando arquivo: " "%(file)s." msgid "Duplicate Entry" msgstr "Entrada Duplicada" #, python-format msgid "Duplicate ID, %s." msgstr "ID duplicado, %s." #, python-format msgid "Duplicate entry: %s" msgstr "Entrada duplicada: %s" #, python-format msgid "Duplicate name, %s." msgstr "Nome duplicado, %s." #, python-format msgid "Duplicate remote ID: %s" msgstr "ID remoto duplicado: %s" msgid "EC2 access key not found." msgstr "Chave de acesso EC2 não encontrada." msgid "EC2 signature not supplied." msgstr "assinatura EC2 não fornecida." msgid "" "Either --bootstrap-password argument or OS_BOOTSTRAP_PASSWORD must be set." msgstr "" "O argumento de senha de inicialização ou OS_BOOTSTRAP_PASSWORD deve ser " "configurado." msgid "Enabled field must be a boolean" msgstr "Campo habilitado precisa ser um booleano" msgid "Enabled field should be a boolean" msgstr "Campo habilitado deve ser um booleano" #, python-format msgid "Endpoint %(endpoint_id)s not found in project %(project_id)s" msgstr "Endpoint %(endpoint_id)s não encontrado no projeto %(project_id)s" msgid "Endpoint Group Project Association not found" msgstr "Associação de Projeto do Grupo do Terminal não localizada" msgid "Ensure configuration option idp_entity_id is set." msgstr "Assegure que a opção de configuração idp_entity_id esteja definida." msgid "Ensure configuration option idp_sso_endpoint is set." msgstr "Assegure que a opção de configuração idp_sso_endpoint esteja definida." #, python-format msgid "" "Error parsing configuration file for domain: %(domain)s, file: %(file)s." msgstr "" "Erro ao analisar o arquivo de configuração para o domínio: %(domain)s, " "arquivo: %(file)s." #, python-format msgid "Error while opening file %(path)s: %(err)s" msgstr "Erro ao abrir arquivo %(path)s: %(err)s" #, python-format msgid "Error while parsing line: '%(line)s': %(err)s" msgstr "Erro ao analisar a linha %(line)s: %(err)s" #, python-format msgid "Error while parsing rules %(path)s: %(err)s" msgstr "Erro ao analisar regras %(path)s: %(err)s" #, python-format msgid "Error while reading metadata file, %(reason)s" msgstr "Erro ao ler arquivo de metadados, %(reason)s" #, python-format msgid "" "Exceeded attempts to register domain %(domain)s to use the SQL driver, the " "last domain that appears to have had it is %(last_domain)s, giving up" msgstr "" "Tentativas de registrar o domínio %(domain)s para usar SQL driver excederam, " "o ultimo domínio que parece ter tido foi %(last_domain)s, desistindo" #, python-format msgid "Expected dict or list: %s" msgstr "Esperado dict ou list: %s" msgid "" "Expected signing certificates are not available on the server. Please check " "Keystone configuration." msgstr "" "Certificados de assinatura esperados não estão disponíveis no servidor. " "Verifique configuração de Keystone." #, python-format msgid "" "Expecting to find %(attribute)s in %(target)s - the server could not comply " "with the request since it is either malformed or otherwise incorrect. The " "client is assumed to be in error." msgstr "" "Esperando localizar %(attribute)s em %(target)s - o servidor não pôde " "obedecer à solicitação porque ela está malformada ou de alguma maneira " "incorreta. O cliente deve estar em erro." #, python-format msgid "Failed to start the %(name)s server" msgstr "Falha ao iniciar o servidor do %(name)s" msgid "Failed to validate token" msgstr "Falha ao validar token" msgid "Federation token is expired" msgstr "O token de federação está expirado" #, python-format msgid "" "Field \"remaining_uses\" is set to %(value)s while it must not be set in " "order to redelegate a trust" msgstr "" "O campo \"remaining_uses\" está configurado como %(value)s enquanto ele não " "deve ser configurado para delegar novamente uma confiança" msgid "Found invalid token: scoped to both project and domain." msgstr "Token inválido encontrado: escopo para ambos o projeto e o domínio." #, python-format msgid "Group %s not found in config" msgstr "Grupo %s não localizado na configuração" #, python-format msgid "Group %(group)s is not supported for domain specific configurations" msgstr "" "O grupo %(group)s não é suportado para configurações específicas do domínio" #, python-format msgid "" "Group %(group_id)s returned by mapping %(mapping_id)s was not found in the " "backend." msgstr "" "Grupo %(group_id)s retornou mapeando %(mapping_id)s não foi localizado no " "backend." #, python-format msgid "" "Group membership across backend boundaries is not allowed, group in question " "is %(group_id)s, user is %(user_id)s" msgstr "" "Associação ao grupo pelos limites de backend não é permitida, o grupo em " "questão é %(group_id)s, o usuário é %(user_id)s" #, python-format msgid "ID attribute %(id_attr)s not found in LDAP object %(dn)s" msgstr "Atributo do ID %(id_attr)s não localizado no objeto LDAP %(dn)s" #, python-format msgid "Identity Provider %(idp)s is disabled" msgstr "O Provedor de Identidade %(idp)s está desativado" msgid "" "Incoming identity provider identifier not included among the accepted " "identifiers." msgstr "" "O identificador do provedor de identidade recebido não está incluído entre " "os identificadores aceitos." msgid "Invalid EC2 signature." msgstr "Assinatura EC2 inválida." #, python-format msgid "Invalid LDAP TLS certs option: %(option)s. Choose one of: %(options)s" msgstr "" "Opção de certificado LADP TLS inválida: %(option)s. Escolha uma de: " "%(options)s" #, python-format msgid "Invalid LDAP TLS_AVAIL option: %s. TLS not available" msgstr "Opção LDAP TLS_AVAIL inválida: %s. TLS não dsponível" #, python-format msgid "Invalid LDAP deref option: %(option)s. Choose one of: %(options)s" msgstr "Opção deref LDAP inválida: %(option)s. Escolha uma destas: %(options)s" #, python-format msgid "Invalid LDAP scope: %(scope)s. Choose one of: %(options)s" msgstr "Escopo LDAP inválido: %(scope)s. Escolha um de: %(options)s" msgid "Invalid TLS / LDAPS combination" msgstr "Combinação TLS / LADPS inválida" #, python-format msgid "Invalid audit info data type: %(data)s (%(type)s)" msgstr "" "Tipo de dados de informações de auditoria inválido: %(data)s (%(type)s)" msgid "Invalid blob in credential" msgstr "BLOB inválido na credencial" #, python-format msgid "" "Invalid domain name: %(domain)s found in config file name: %(file)s - " "ignoring this file." msgstr "" "Nome de domínio inválido: %(domain)s localizado no nome do arquivo de " "configuração: %(file)s - ignorando este arquivo." #, python-format msgid "Invalid domain specific configuration: %(reason)s" msgstr "Configuração específica de domínio inválida: %(reason)s" #, python-format msgid "Invalid input for field '%(path)s'. The value is '%(value)s'." msgstr "Entrada inválida para o campo '%(path)s'. O valor é '%(value)s'." msgid "Invalid limit value" msgstr "Valor limite inválido" #, python-format msgid "" "Invalid mix of entities for policy association - only Endpoint, Service or " "Region+Service allowed. Request was - Endpoint: %(endpoint_id)s, Service: " "%(service_id)s, Region: %(region_id)s" msgstr "" "Combinação de entidades inválida para associação de política - somente " "Terminal, Serviço ou Região+Serviço permitido. A solicitação foi - Terminal: " "%(endpoint_id)s, Serviço: %(service_id)s, Região: %(region_id)s" #, python-format msgid "" "Invalid rule: %(identity_value)s. Both 'groups' and 'domain' keywords must " "be specified." msgstr "" "Regra inválida: %(identity_value)s. As palavras-chave 'groups' e 'domain' " "devem ser especificadas." msgid "Invalid signature" msgstr "Assinatura inválida" msgid "Invalid user / password" msgstr "Usuário / senha inválido" msgid "Invalid username or TOTP passcode" msgstr "Nome de usuário ou passcode TOTP inválido" msgid "Invalid username or password" msgstr "Nome de usuário ou senha inválidos" #, python-format msgid "KVS region %s is already configured. Cannot reconfigure." msgstr "Região KVS %s já está configurado. Não é possível reconfigurar." #, python-format msgid "Key Value Store not configured: %s" msgstr "Armazenamento do Valor da Chave não configurado: %s" #, python-format msgid "LDAP %s create" msgstr "Criação de LDAP %s" #, python-format msgid "LDAP %s delete" msgstr "Exclusão de LDAP %s" #, python-format msgid "LDAP %s update" msgstr "Atualização de LDAP %s" msgid "" "Length of transformable resource id > 64, which is max allowed characters" msgstr "" "O comprimento do recurso transformável id > 64, que é o máximo de caracteres " "permitidos" #, python-format msgid "" "Local section in mapping %(mapping_id)s refers to a remote match that " "doesn't exist (e.g. {0} in a local section)." msgstr "" "A seção local no mapeamento %(mapping_id)s refere-se a uma correspondência " "remota que não existe (por exemplo, {0} em uma seção local)." #, python-format msgid "Lock Timeout occurred for key, %(target)s" msgstr "Ocorreu um tempo limite de bloqueio para a chave, %(target)s" #, python-format msgid "Lock key must match target key: %(lock)s != %(target)s" msgstr "" "Chave de bloqueio deve corresponder à chave de destino: %(lock)s !=%(target)s" #, python-format msgid "Malformed endpoint URL (%(endpoint)s), see ERROR log for details." msgstr "" "URL de endpoint mal-formada (%(endpoint)s), veja o log de ERROS para " "detalhes." msgid "Marker could not be found" msgstr "Marcador não pôde ser encontrado" #, python-format msgid "Max hierarchy depth reached for %s branch." msgstr "Profundidade máx. de hierarquia atingida para a ramificação %s." #, python-format msgid "Maximum lock attempts on %s occurred." msgstr "Máximo de tentativas de bloqueio em %s ocorreu." #, python-format msgid "Member %(member)s is already a member of group %(group)s" msgstr "O membro %(member)s já é membro do grupo %(group)s" #, python-format msgid "Method not callable: %s" msgstr "Método não pode ser chamado: %s" msgid "Missing entity ID from environment" msgstr "ID da entidade ausente a partir do ambiente" msgid "" "Modifying \"redelegation_count\" upon redelegation is forbidden. Omitting " "this parameter is advised." msgstr "" "A modificação de \"redelegation_count\" é proibida. É recomendado omitir " "este parâmetro." msgid "Multiple domains are not supported" msgstr "Múltiplos domínios não são suportados" msgid "Must be called within an active lock context." msgstr "Deve ser chamado dentro de um contexto de bloqueio ativo." msgid "Must specify either domain or project" msgstr "Deve especificar o domínio ou projeto" msgid "Name field is required and cannot be empty" msgstr "Campo nome é requerido e não pode ser vazio" msgid "Neither Project Domain ID nor Project Domain Name was provided." msgstr "" "Nem o ID do Domínio do Projeto nem o Nome do Domíno do Projeto foram " "fornecidos." msgid "" "No Authorization headers found, cannot proceed with OAuth related calls, if " "running under HTTPd or Apache, ensure WSGIPassAuthorization is set to On." msgstr "" "Nenhum cabeçalho de autorização foi localizado, não é possível continuar com " "chamadas relacionadas OAuth, se estiver executando sob HTTPd ou Apache, se " "WSGIPassAuthorization for configurado para Ligado." msgid "No authenticated user" msgstr "Nenhum usuário autenticado" msgid "" "No encryption keys found; run keystone-manage fernet_setup to bootstrap one." msgstr "" "Nenhuma chave de criptografia foi localizada; execute keystone-manage " "fernet_setup para autoinicialização um." msgid "No options specified" msgstr "Nenhuma opção especificada" #, python-format msgid "No policy is associated with endpoint %(endpoint_id)s." msgstr "Nenhuma política associada ao terminal %(endpoint_id)s." #, python-format msgid "No remaining uses for trust: %(trust_id)s" msgstr "Nenhum uso restante para confiança: %(trust_id)s" msgid "No token in the request" msgstr "Não existe token na solicitação." msgid "Non-default domain is not supported" msgstr "O domínio não padrão não é suportado" msgid "One of the trust agents is disabled or deleted" msgstr "Um dos agentes de confiança está desativado ou excluído" #, python-format msgid "" "Option %(option)s found with no group specified while checking domain " "configuration request" msgstr "" "A opção %(option)s localizada sem grupo especificado durante a verificação " "de domínio solicitação de configuração" #, python-format msgid "" "Option %(option)s in group %(group)s is not supported for domain specific " "configurations" msgstr "" "A opção %(option)s no grupo %(group)s não é suportada para configurações " "específicas de domínio" #, python-format msgid "Project (%s)" msgstr "Projeto (%s)" #, python-format msgid "Project ID not found: %(t_id)s" msgstr "ID de projeto não encontrado: %(t_id)s" msgid "Project field is required and cannot be empty." msgstr "Campo projeto é requerido e não pode ser vazio." #, python-format msgid "Project is disabled: %s" msgstr "O projeto está desativado: %s" msgid "Project name cannot contain reserved characters." msgstr "O nome do projeto não pode conter caracteres reservados." msgid "Query string is not UTF-8 encoded" msgstr "A query_string não está codificada em UTF-8 " #, python-format msgid "" "Reading the default for option %(option)s in group %(group)s is not supported" msgstr "" "Não é suportado ler o padrão para a opção %(option)s no grupo %(group)s" msgid "Redelegation allowed for delegated by trust only" msgstr "Nova delegação permitida para delegado pela confiança somente" #, python-format msgid "" "Remaining redelegation depth of %(redelegation_depth)d out of allowed range " "of [0..%(max_count)d]" msgstr "" "Profundidade da redelegação restante do %(redelegation_depth)d fora do " "intervalo permitido de [0..%(max_count)d]" msgid "" "Remove admin_crud_extension from the paste pipeline, the admin_crud " "extension is now always available. Updatethe [pipeline:admin_api] section in " "keystone-paste.ini accordingly, as it will be removed in the O release." msgstr "" "Remova a admin_crud_extension do pipeline paste, já que a extensão " "admin_crud agora está sempre disponível. Atualize a seção [pipeline:" "admin_api] no keystone-paste.ini de acordo, já que ela será removida da " "liberação O." msgid "" "Remove endpoint_filter_extension from the paste pipeline, the endpoint " "filter extension is now always available. Update the [pipeline:api_v3] " "section in keystone-paste.ini accordingly as it will be removed in the O " "release." msgstr "" "Remova a endpoint_filter_extension do pipeline paste, já que a extensão de " "filtro de terminal agora está sempre está disponível. Atualize a seção " "[pipeline:api_v3] no keystone-paste.ini de acordo, já que ela será removida " "da liberação O." msgid "" "Remove federation_extension from the paste pipeline, the federation " "extension is now always available. Update the [pipeline:api_v3] section in " "keystone-paste.ini accordingly, as it will be removed in the O release." msgstr "" "Remova a federation_extension do pipeline paste, já que a extensão de " "federação agora está sempre está disponível. Atualize a seção [pipeline:" "api_v3] no keystone-paste.ini de acordo, já que ela será removida da " "liberação O." msgid "" "Remove oauth1_extension from the paste pipeline, the oauth1 extension is now " "always available. Update the [pipeline:api_v3] section in keystone-paste.ini " "accordingly, as it will be removed in the O release." msgstr "" "Remova oauth1_extension do pipeline paste, já que a extensão oauth1 agora " "está sempre está disponível. Atualize a seção [pipeline:api_v3] no keystone-" "paste.ini de acordo, já que ela será removida da liberação O." msgid "" "Remove revoke_extension from the paste pipeline, the revoke extension is now " "always available. Update the [pipeline:api_v3] section in keystone-paste.ini " "accordingly, as it will be removed in the O release." msgstr "" "Remova revoke_extension do pipeline paste, já que a extensão de revogação " "agora está sempre está disponível. Atualize a seção [pipeline:api_v3] no " "keystone-paste.ini de acordo, já que ela será removida da liberação O." msgid "" "Remove simple_cert from the paste pipeline, the PKI and PKIz token providers " "are now deprecated and simple_cert was only used insupport of these token " "providers. Update the [pipeline:api_v3] section in keystone-paste.ini " "accordingly, as it will be removed in the O release." msgstr "" "Remova simple_cert do pipeline paste, já que os provedores PKI e PKIz estão " "agora descontinuados e simple_cert era usado somente em suporte a esses " "provedores de token. Atualize a seção [pipeline:api_v3] no keystone-paste." "ini de acordo, já que ela será removida da liberação O." msgid "" "Remove user_crud_extension from the paste pipeline, the user_crud extension " "is now always available. Updatethe [pipeline:public_api] section in keystone-" "paste.ini accordingly, as it will be removed in the O release." msgstr "" "Remova a user_crud_extension do pipeline paste, já que a extensão user_crud " "agora está sempre disponível. Atualize a seção [pipeline:public_api] no " "keystone-paste.ini de acordo, já que ela será removida da liberação O." msgid "Request Token does not have an authorizing user id" msgstr "Token de Requisição não possui um ID de usuário autorizado" #, python-format msgid "" "Request attribute %(attribute)s must be less than or equal to %(size)i. The " "server could not comply with the request because the attribute size is " "invalid (too large). The client is assumed to be in error." msgstr "" "Atributo de requisição %(attribute)s deve ser menor ou igual a %(size)i. O " "servidor não pôde atender a requisição porque o tamanho do atributo é " "inválido (muito grande). Assume-se que o cliente está em erro." msgid "Request must have an origin query parameter" msgstr "A solicitação deve ter um parâmetro de consulta de origem" msgid "Request token is expired" msgstr "Token de requisição expirou" msgid "Request token not found" msgstr "Token de requisição não encontrado" msgid "Requested expiration time is more than redelegated trust can provide" msgstr "" "Prazo de expiração solicitado é maior do que a confiança delegada novamente " "pode fornecer" #, python-format msgid "" "Requested redelegation depth of %(requested_count)d is greater than allowed " "%(max_count)d" msgstr "" "Profundidade da nova delegação solicitada de %(requested_count)d é maior que " "a %(max_count)d permitida" msgid "" "Running keystone via eventlet is deprecated as of Kilo in favor of running " "in a WSGI server (e.g. mod_wsgi). Support for keystone under eventlet will " "be removed in the \"M\"-Release." msgstr "" "Executar o keystone via eventlet foi descontinuado como Kilo em favor de " "executar em um servidor WSGI (por exemplo, mod_wsgi). Suporte para o " "keystone sob eventlet será removida no \"M\"-Release." msgid "Scoping to both domain and project is not allowed" msgstr "A definição de escopo para o domínio e o projeto não é permitida" msgid "Scoping to both domain and trust is not allowed" msgstr "A definição de escopo para o domínio e a trust não é permitida" msgid "Scoping to both project and trust is not allowed" msgstr "A definição de escopo para o projeto e a trust não é permitida" #, python-format msgid "Service Provider %(sp)s is disabled" msgstr "O Provedor de Serviços %(sp)s está desativado" msgid "Some of requested roles are not in redelegated trust" msgstr "Algumas funções de confiança não estão na confiança da nova delegação" msgid "Specify a domain or project, not both" msgstr "Especifique um domínio ou projeto, não ambos" msgid "Specify a user or group, not both" msgstr "Epecifique um usuário ou grupo, não ambos" msgid "Specify one of domain or project" msgstr "Especifique um domínio ou projeto" msgid "Specify one of user or group" msgstr "Especifique um usuário ou grupo" #, python-format msgid "" "String length exceeded.The length of string '%(string)s' exceeded the limit " "of column %(type)s(CHAR(%(length)d))." msgstr "" "Comprimento de string excedido. O comprimento de string '%(string)s' excedeu " "o limite da coluna %(type)s(CHAR(%(length)d))." msgid "Tenant name cannot contain reserved characters." msgstr "O nome do locatário não pode conter caracteres reservados." #, python-format msgid "" "The %s extension has been moved into keystone core and as such its " "migrations are maintained by the main keystone database control. Use the " "command: keystone-manage db_sync" msgstr "" "A extensão %s foi movida para o núcleo do keystone e, com isso, suas " "migrações são mantidas pelo controle de banco de dados keystone principal. " "Use o comando: keystone-manage db_sync" msgid "" "The 'expires_at' must not be before now. The server could not comply with " "the request since it is either malformed or otherwise incorrect. The client " "is assumed to be in error." msgstr "" "O 'expires_at' não deve ser anterior a agora. O servidor não pôde obedecer à " "solicitação porque ela está malformada ou de alguma maneira incorreta. O " "cliente é assumido como tendo erro." msgid "The --all option cannot be used with the --domain-name option" msgstr "A opção --all não pode ser usada com a opção --domain-name" #, python-format msgid "The Keystone configuration file %(config_file)s could not be found." msgstr "" "O arquivo de configuração do Keystone %(config_file)s não pôde ser " "localizado." #, python-format msgid "" "The Keystone domain-specific configuration has specified more than one SQL " "driver (only one is permitted): %(source)s." msgstr "" "A configuração específica de domínio Keystone especificou mais de um driver " "SQL (somente um é permitido): %(source)s." msgid "The action you have requested has not been implemented." msgstr "A ação que você solicitou não foi implementada." msgid "The authenticated user should match the trustor." msgstr "O usuário autenticado deve corresponder à confiança." msgid "" "The certificates you requested are not available. It is likely that this " "server does not use PKI tokens otherwise this is the result of " "misconfiguration." msgstr "" "Os certificados que você solicitou não estão disponíveis. É provável que " "esse servidor não utiliza tokens PKI, caso contrário, este é o resultado de " "configuração incorreta." msgid "The configured token provider does not support bind authentication." msgstr "O provedor de token configurado não suporta autenticação de ligação." msgid "The creation of projects acting as domains is not allowed in v2." msgstr "A criação de projetos agindo como domínios não é permitida na v2." #, python-format msgid "" "The password length must be less than or equal to %(size)i. The server could " "not comply with the request because the password is invalid." msgstr "" "O comprimento da senha deve ser menor ou igual a %(size)i. O servidor não " "pôde obedecer à solicitação porque a senha é inválida." msgid "The request you have made requires authentication." msgstr "A requisição que você fez requer autenticação." msgid "The resource could not be found." msgstr "O recurso não pôde ser localizado." msgid "" "The revoke call must not have both domain_id and project_id. This is a bug " "in the Keystone server. The current request is aborted." msgstr "" "A chamada de revogação não deve ter ambos domain_id e project_id. Esse é um " "erro no servidor do Keystone. A solicitação atual foi interrompida." msgid "The service you have requested is no longer available on this server." msgstr "O serviço que você solicitou não está mais disponível neste servidor." #, python-format msgid "" "The specified parent region %(parent_region_id)s would create a circular " "region hierarchy." msgstr "" "A região pai especificada %(parent_region_id)s criaria uma hierarquia de " "região circular." #, python-format msgid "" "The value of group %(group)s specified in the config should be a dictionary " "of options" msgstr "" "O valor do grupo %(group)s especificado na configuração deverá ser um " "dicionário de opções" msgid "There should not be any non-oauth parameters" msgstr "Não deve haver nenhum parâmetro não oauth" #, python-format msgid "This is not a recognized Fernet payload version: %s" msgstr "Esta não é uma versão de carga útil do Fernet reconhecida: %s" #, python-format msgid "This is not a recognized Fernet token %s" msgstr "Este não é um token Fernet %s reconhecido" msgid "" "Timestamp not in expected format. The server could not comply with the " "request since it is either malformed or otherwise incorrect. The client is " "assumed to be in error." msgstr "" "A data não está no formato especificado. O servidor não pôde realizar a " "requisição pois ela está mal formada ou incorreta. Assume-se que o cliente " "está com erro." #, python-format msgid "" "To get a more detailed information on this error, re-run this command for " "the specific domain, i.e.: keystone-manage domain_config_upload --domain-" "name %s" msgstr "" "Para obter uma obter informações mais detalhadas sobre este erro, execute " "novamente este comando para o domínio específico, ou seja: keystone-manage " "domain_config_upload --domain-name %s" msgid "Token belongs to another user" msgstr "O token pertence à outro usuário" msgid "Token does not belong to specified tenant." msgstr "O token não pertence ao tenant especificado." msgid "Token version is unrecognizable or unsupported." msgstr "A versão de Token é irreconhecida ou não suportada" msgid "Trustee has no delegated roles." msgstr "Fiador não possui roles delegados." msgid "Trustor is disabled." msgstr "O fiador está desativado." #, python-format msgid "" "Trying to update group %(group)s, so that, and only that, group must be " "specified in the config" msgstr "" "Tentando atualizar o grupo %(group)s de modo que, e apenas que, o grupo deve " "ser especificado na configuração" #, python-format msgid "" "Trying to update option %(option)s in group %(group)s, but config provided " "contains option %(option_other)s instead" msgstr "" "Tentando atualizar a opção %(option)s no grupo %(group)s, mas a configuração " "fornecida contém %(option_other)s ao invés" #, python-format msgid "" "Trying to update option %(option)s in group %(group)s, so that, and only " "that, option must be specified in the config" msgstr "" "Tentando atualizar a opção %(option)s no grupo %(group)s, de modo que, e " "apenas que, a opção deve ser especificada na configuração" msgid "" "Unable to access the keystone database, please check it is configured " "correctly." msgstr "" "Não é possível acessar o banco de dados keystone, verifique se ele está " "configurado corretamente." #, python-format msgid "Unable to consume trust %(trust_id)s, unable to acquire lock." msgstr "" "Não é possível consumir a confiança %(trust_id)s, não é possível adquirir o " "bloqueio." #, python-format msgid "" "Unable to delete region %(region_id)s because it or its child regions have " "associated endpoints." msgstr "" "Não foi possível excluir a região %(region_id)s, uma vez que ela ou suas " "regiões filhas possuem terminais associados." msgid "Unable to downgrade schema" msgstr "Não é possível fazer downgrade do esquema" #, python-format msgid "Unable to find valid groups while using mapping %(mapping_id)s" msgstr "" "Não é possível localizar os grupos válidos ao utilizar o mapeamento " "%(mapping_id)s" #, python-format msgid "Unable to locate domain config directory: %s" msgstr "Não é possível localizar diretório de configuração de domínio: %s" #, python-format msgid "Unable to lookup user %s" msgstr "Não é possível consultar o usuário %s" #, python-format msgid "" "Unable to reconcile identity attribute %(attribute)s as it has conflicting " "values %(new)s and %(old)s" msgstr "" "Não é possível reconciliar o atributo de identidade %(attribute)s, pois ele " "possui valores conflitantes %(new)s e %(old)s" #, python-format msgid "" "Unable to sign SAML assertion. It is likely that this server does not have " "xmlsec1 installed, or this is the result of misconfiguration. Reason " "%(reason)s" msgstr "" "Não é possível assinar asserção SAML. Provavelmente esse servidor não possui " "o xmlsec1 instalado, ou isso é o resultado de uma configuração incorreta. " "Motivo %(reason)s" msgid "Unable to sign token." msgstr "Não é possível assinar o token." #, python-format msgid "Unexpected assignment type encountered, %s" msgstr "Tipo de designação inesperada encontrada, %s" #, python-format msgid "" "Unexpected combination of grant attributes - User: %(user_id)s, Group: " "%(group_id)s, Project: %(project_id)s, Domain: %(domain_id)s" msgstr "" "Combinação inesperada de atributos de concessão – Usuário: %(user_id)s, " "Grupo: %(group_id)s, Projeto: %(project_id)s, Domínio: %(domain_id)s" #, python-format msgid "Unexpected status requested for JSON Home response, %s" msgstr "Status inesperado solicitado para resposta JSON Home, %s" msgid "Unknown Target" msgstr "Alvo Desconhecido" #, python-format msgid "Unknown domain '%(name)s' specified by --domain-name" msgstr "Domínio desconhecido '%(name)s' especificado pelo --domain-name" #, python-format msgid "Unknown token version %s" msgstr "Versão de token desconhecida %s" #, python-format msgid "Unregistered dependency: %(name)s for %(targets)s" msgstr "Dependência não registrada: %(name)s para %(targets)s" msgid "Update of `domain_id` is not allowed." msgstr "Atualização de `domain_id` não é permitida." msgid "Update of `is_domain` is not allowed." msgstr "Atualização de `is_domain` não é permitida." msgid "Update of `parent_id` is not allowed." msgstr "Atualização de ‘parent_id’ não é permitida." msgid "Update of domain_id is only allowed for root projects." msgstr "A atualização de domain_id é permitida somente para projetos raízes." msgid "Update of domain_id of projects acting as domains is not allowed." msgstr "" "Não é permitido atualizar domain_id de projetos que agem como domínios." msgid "Use a project scoped token when attempting to create a SAML assertion" msgstr "" "Use um token com escopo definido do projeto ao tentar criar uma asserção SAML" msgid "" "Use of the identity driver config to automatically configure the same " "assignment driver has been deprecated, in the \"O\" release, the assignment " "driver will need to be expicitly configured if different than the default " "(SQL)." msgstr "" "O uso da configuração do driver de identidade para configurar " "automaticamente o mesmo driver de designação foi descontinuado. Na liberação " "\"O\", o driver de designação precisará ser configurado explicitamente caso " "seja diferente do padrão (SQL)." #, python-format msgid "User %(u_id)s is unauthorized for tenant %(t_id)s" msgstr "Usuário %(u_id)s não está autorizado para o tenant %(t_id)s" #, python-format msgid "User %(user_id)s has no access to domain %(domain_id)s" msgstr "O usuário %(user_id)s não tem acesso ao domínio %(domain_id)s" #, python-format msgid "User %(user_id)s has no access to project %(project_id)s" msgstr "O usuário %(user_id)s não tem acesso ao projeto %(project_id)s" #, python-format msgid "User %(user_id)s is already a member of group %(group_id)s" msgstr "Usuário %(user_id)s já é membro do grupo %(group_id)s" #, python-format msgid "User '%(user_id)s' not found in group '%(group_id)s'" msgstr "Usuário '%(user_id)s' não localizado no grupo '%(group_id)s'" msgid "User IDs do not match" msgstr "ID de usuário não confere" msgid "" "User auth cannot be built due to missing either user id, or user name with " "domain id, or user name with domain name." msgstr "" "A autenticação do usuário não pode ser construída porque está faltando o ID " "ou o nome do usuário com o ID do domínio ou o nome do usuário com o nome do " "domínio." #, python-format msgid "User is disabled: %s" msgstr "O usuário está desativado: %s" msgid "User is not a member of the requested project" msgstr "Usuário não é um membro do projeto requisitado" msgid "User is not a trustee." msgstr "Usuário não é confiável." msgid "User not found" msgstr "Usuário não localizado" msgid "User not valid for tenant." msgstr "Usuário não é válido para o tenant." msgid "User roles not supported: tenant_id required" msgstr "Papéis de usuários não suportado: necessário tenant_id" #, python-format msgid "User type %s not supported" msgstr "Tipo de usuário %s não suportado" msgid "You are not authorized to perform the requested action." msgstr "Você não está autorizado à realizar a ação solicitada." #, python-format msgid "You are not authorized to perform the requested action: %(action)s" msgstr "Você não está autorizado a executar a ação solicitada: %(action)s" msgid "" "You have tried to create a resource using the admin token. As this token is " "not within a domain you must explicitly include a domain for this resource " "to belong to." msgstr "" "Você tentou criar um recurso usando o token de administração. Como esse " "token não está dentro de um domínio, deve-se incluir explicitamente um " "domínio ao qual esse recurso possa pertencer." msgid "`key_mangler` functions must be callable." msgstr "Funções `key_mangler` devem ser chamáveis." msgid "`key_mangler` option must be a function reference" msgstr "opção `key_mangler` deve ser uma referência de função" msgid "any options" msgstr "quaisquer opções" msgid "auth_type is not Negotiate" msgstr "auth_type não é Negotiate" msgid "authorizing user does not have role required" msgstr "Usuário autorizado não possui o role necessário" #, python-format msgid "cannot create a project in a branch containing a disabled project: %s" msgstr "" "não é possível criar um projeto em uma ramificação que contém um projeto " "desativado: %s" #, python-format msgid "" "cannot delete an enabled project acting as a domain. Please disable the " "project %s first." msgstr "" "Não é possível excluir um projeto ativado que age como um domínio. Desative " "o projeto %s primeiro." #, python-format msgid "group %(group)s" msgstr "grupo %(group)s" msgid "" "idp_contact_type must be one of: [technical, other, support, administrative " "or billing." msgstr "" "idp_contact_type deve ser uma dessas opções: [técnico, outro, suporte, " "administrativo ou faturamento." #, python-format msgid "invalid date format %s" msgstr "formato de data inválido %s" #, python-format msgid "" "it is not permitted to have two projects acting as domains with the same " "name: %s" msgstr "" "Não é permitido ter dois projetos agindo como domínios com o mesmo nome: %s" #, python-format msgid "" "it is not permitted to have two projects within a domain with the same " "name : %s" msgstr "" "Não é permitido ter dois projetos dentro de um domínio com o mesmo nome: %s" msgid "only root projects are allowed to act as domains." msgstr "Somente projetos raízes são permitidos para agirem como domínios. " #, python-format msgid "option %(option)s in group %(group)s" msgstr "opção %(option)s no grupo %(group)s" msgid "provided consumer key does not match stored consumer key" msgstr "" "Chave de consumidor fornecida não confere com a chave de consumidor " "armazenada" msgid "provided request key does not match stored request key" msgstr "" "Chave de requisição do provedor não confere com a chave de requisição " "armazenada" msgid "provided verifier does not match stored verifier" msgstr "Verificador fornecido não confere com o verificador armazenado" msgid "remaining_uses must be a positive integer or null." msgstr "remaining_uses deve ser um número inteiro positivo ou nulo." msgid "remaining_uses must not be set if redelegation is allowed" msgstr "" "remaining_uses não deverá ser definido se a nova delegação for permitida" #, python-format msgid "" "request to update group %(group)s, but config provided contains group " "%(group_other)s instead" msgstr "" "solicite atualizar o grupo %(group)s, mas a configuração fornecida contém o " "grupo %(group_other)s ao invés" msgid "rescope a scoped token" msgstr "Defina novamente um escopo de um token com escopo" #, python-format msgid "role %s is not defined" msgstr "papel %s não foi definido" msgid "scope.project.id must be specified if include_subtree is also specified" msgstr "" "scope.project.id deverá ser especificado se include_subtree também for " "especificado" #, python-format msgid "tls_cacertdir %s not found or is not a directory" msgstr "tls_cacertdir %s não encontrado ou não é um diretório" #, python-format msgid "tls_cacertfile %s not found or is not a file" msgstr "tls_cacertfile %s não encontrada ou não é um arquivo" #, python-format msgid "token reference must be a KeystoneToken type, got: %s" msgstr "referência de token deve ser um tipo KeystoneToken, obteve: %s" msgid "" "update of domain_id is deprecated as of Mitaka and will be removed in O." msgstr "" "A atualização de domain_id foi descontinuada a partir do Mitaka e será " "removida na liberação O." #, python-format msgid "" "validated expected to find %(param_name)r in function signature for " "%(func_name)r." msgstr "" "validado esperava localizar %(param_name)r na assinatura da função para " "%(func_name)r." keystone-9.0.0/keystone/locale/it/0000775000567000056710000000000012701407246020241 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/locale/it/LC_MESSAGES/0000775000567000056710000000000012701407246022026 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/locale/it/LC_MESSAGES/keystone-log-critical.po0000664000567000056710000000154612701407102026573 0ustar jenkinsjenkins00000000000000# Translations template for keystone. # Copyright (C) 2015 OpenStack Foundation # This file is distributed under the same license as the keystone project. # # Translators: # OpenStack Infra , 2015. #zanata msgid "" msgstr "" "Project-Id-Version: keystone 9.0.0.0b2.dev256\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n" "POT-Creation-Date: 2016-01-18 05:50+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2014-08-31 03:19+0000\n" "Last-Translator: openstackjenkins \n" "Language: it\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Italian\n" #, python-format msgid "Unable to open template file %s" msgstr "Impossibile aprire il file di template %s" keystone-9.0.0/keystone/locale/it/LC_MESSAGES/keystone.po0000664000567000056710000015672312701407105024237 0ustar jenkinsjenkins00000000000000# Translations template for keystone. # Copyright (C) 2015 OpenStack Foundation # This file is distributed under the same license as the keystone project. # # Translators: # OpenStack Infra , 2015. #zanata # Tom Cocozzello , 2015. #zanata # Alessandra , 2016. #zanata # Tom Cocozzello , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: keystone 9.0.0.0rc2.dev6\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n" "POT-Creation-Date: 2016-03-22 15:08+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-22 02:50+0000\n" "Last-Translator: Alessandra \n" "Language: it\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Italian\n" #, python-format msgid "%(detail)s" msgstr "%(detail)s" #, python-format msgid "%(driver)s is not supported driver version" msgstr "%(driver)s non è una versione driver supportata" #, python-format msgid "" "%(entity)s name cannot contain the following reserved characters: %(chars)s" msgstr "Il nome %(entity)s non può contenere caratteri riservati: %(chars)s" #, python-format msgid "" "%(event)s is not a valid notification event, must be one of: %(actions)s" msgstr "" "%(event)s non è un evento di notifica valido, deve essere uno tra: " "%(actions)s" #, python-format msgid "%(host)s is not a trusted dashboard host" msgstr "%(host)s non è un host di dashboard attendibile" #, python-format msgid "%(message)s %(amendment)s" msgstr "%(message)s %(amendment)s" #, python-format msgid "" "%(mod_name)s doesn't provide database migrations. The migration repository " "path at %(path)s doesn't exist or isn't a directory." msgstr "" "%(mod_name)s non fornisce le migrazioni del database. Il percorso del " "repository di migrazione in %(path)s non esiste o non è una directory." #, python-format msgid "%(prior_role_id)s does not imply %(implied_role_id)s" msgstr "%(prior_role_id)s non implica %(implied_role_id)s" #, python-format msgid "%(property_name)s cannot be less than %(min_length)s characters." msgstr "%(property_name)s non può essere inferiore a %(min_length)s caratteri." #, python-format msgid "%(property_name)s is not a %(display_expected_type)s" msgstr "%(property_name)s non è un %(display_expected_type)s" #, python-format msgid "%(property_name)s should not be greater than %(max_length)s characters." msgstr "%(property_name)s non può essere superiore a %(max_length)s caratteri." #, python-format msgid "%(role_id)s cannot be an implied roles" msgstr "%(role_id)s non può essere un ruolo implicato" #, python-format msgid "%s cannot be empty." msgstr "%s non può essere vuoto." #, python-format msgid "%s extension does not exist." msgstr "L'estensione %s non esiste." #, python-format msgid "%s field is required and cannot be empty" msgstr "Il campo %s è obbligatorio e non può essere vuoto" #, python-format msgid "%s field(s) cannot be empty" msgstr "i campi %s non possono essere vuoti" #, python-format msgid "" "%s for the LDAP identity backend has been deprecated in the Mitaka release " "in favor of read-only identity LDAP access. It will be removed in the \"O\" " "release." msgstr "" "%s per il backend di 'identità LDAP è obsoleto nella release Mitaka rispetto " "all'accesso LDAP di sola lettura. Verrà rimosso nella release \"O\"." msgid "(Disable insecure_debug mode to suppress these details.)" msgstr "" "(Disabilitare la modalità insecure_debug per eliminare questi dettagli)." msgid "--all option cannot be mixed with other options" msgstr "--l'opzione all non può essere combinata con altre opzioni" msgid "A project-scoped token is required to produce a service catalog." msgstr "" "È necessario un token in ambito progetto per produrre un catalogo del " "servizio." msgid "Access token is expired" msgstr "Il token di accesso è scaduto" msgid "Access token not found" msgstr "Token di accesso non trovato" msgid "Additional authentications steps required." msgstr "Sono richiesti ulteriori passi per le autenticazioni." msgid "An unexpected error occurred when retrieving domain configs" msgstr "" "Si è verificato un errore non previsto durante il richiamo delle " "configurazioni del dominio" #, python-format msgid "An unexpected error occurred when trying to store %s" msgstr "Si è verificato un errore quando si tenta di archiviare %s" msgid "An unexpected error prevented the server from fulfilling your request." msgstr "" "Si è verificato un errore non previsto che ha impedito al server di " "soddisfare la richiesta." #, python-format msgid "" "An unexpected error prevented the server from fulfilling your request: " "%(exception)s" msgstr "" "Si è verificato un errore imprevisto che impedisce al server di soddisfare " "la richiesta: %(exception)s" msgid "An unhandled exception has occurred: Could not find metadata." msgstr "" "Si è verificata un'eccezione non gestita: impossibile trovare i metadati." msgid "At least one option must be provided" msgstr "È necessario fornire almeno un'opzione" msgid "At least one option must be provided, use either --all or --domain-name" msgstr "" "È necessario fornire almeno un'opzione, utilizzare --all o --domain-name" msgid "At least one role should be specified." msgstr "Specificare almeno un ruolo." #, python-format msgid "" "Attempted automatic driver selection for assignment based upon " "[identity]\\driver option failed since driver %s is not found. Set " "[assignment]/driver to a valid driver in keystone config." msgstr "" "Tentata selezione automatica del driver per l'assegnazione basata su " "[identity]. Opzione \\driver non riuscita in quanto il driver %s non è stato " "trovato. Impostare [assignment]/driver su un driver valido nella " "configurazione keystone." msgid "Attempted to authenticate with an unsupported method." msgstr "Tentativo di autenticazione con un metodo non supportato." msgid "" "Attempting to use OS-FEDERATION token with V2 Identity Service, use V3 " "Authentication" msgstr "" "Tentativo di utilizzare il token OS-FEDERATION con il servizio identità V2, " "utilizzare l'autenticazione V3" msgid "Authentication plugin error." msgstr "errore di autenticazione plugin." #, python-format msgid "" "Backend `%(backend)s` is not a valid memcached backend. Valid backends: " "%(backend_list)s" msgstr "" "Il backend `%(backend)s` non è un backend memcached valido. Backend validi: " "%(backend_list)s" msgid "Cannot authorize a request token with a token issued via delegation." msgstr "" "Impossibile autorizzare un token di richiesta con un token emesso mediante " "delega." #, python-format msgid "Cannot change %(option_name)s %(attr)s" msgstr "Impossibile modificare %(option_name)s %(attr)s" msgid "Cannot change Domain ID" msgstr "Impossibile modificare l'ID dominio" msgid "Cannot change user ID" msgstr "Impossibile modificare l'ID utente" msgid "Cannot change user name" msgstr "Impossibile modificare il nome utente" #, python-format msgid "Cannot create an endpoint with an invalid URL: %(url)s" msgstr "Impossibile creare un endpoint con un URL non valido: %(url)s" #, python-format msgid "Cannot create project with parent: %(project_id)s" msgstr "Impossibile creare il progetto con l'elemento parent: %(project_id)s" #, python-format msgid "" "Cannot create project, since it specifies its owner as domain %(domain_id)s, " "but specifies a parent in a different domain (%(parent_domain_id)s)." msgstr "" "Impossibile creare un progetto in quanto specifica il relativo proprietario " "come un dominio (%(domain_id)s) ma specifica un elemento parent in un altro " "dominio (%(parent_domain_id)s)." #, python-format msgid "" "Cannot create project, since its parent (%(domain_id)s) is acting as a " "domain, but project's specified parent_id (%(parent_id)s) does not match " "this domain_id." msgstr "" "Impossibile creare un progetto in quanto il relativo parent (%(domain_id)s) " "agisce come un dominio, ma l'id_parent (%(parent_id)s) specificato del " "progetto non corrisponde all'id_dominio." msgid "Cannot delete a domain that is enabled, please disable it first." msgstr "" "Impossibile eliminare un dominio abilitato; è necessario prima disabilitarlo." #, python-format msgid "" "Cannot delete project %(project_id)s since its subtree contains enabled " "projects." msgstr "" "Impossibile eliminare il progetto %(project_id)s perché la relativa " "struttura ad albero secondaria contiene progetti abilitati." #, python-format msgid "" "Cannot delete the project %s since it is not a leaf in the hierarchy. Use " "the cascade option if you want to delete a whole subtree." msgstr "" "Impossibile eliminare il progetto %s perché non è una foglia nella " "gerarchia. Se si desidera eliminare un'intera struttura ad albero secondaria " "utilizza l'opzione a catena." #, python-format msgid "" "Cannot disable project %(project_id)s since its subtree contains enabled " "projects." msgstr "" "Impossibile disabilitare il progetto %(project_id)s perché la relativa " "struttura ad albero secondaria contiene progetti abilitati." #, python-format msgid "Cannot enable project %s since it has disabled parents" msgstr "" "Impossibile abilitare il progetto %s perché dispone di elementi parent " "disabilitati" msgid "Cannot list assignments sourced from groups and filtered by user ID." msgstr "" "Impossibile elencare le assegnazione originate da gruppi e filtrate da ID " "utente." msgid "Cannot list request tokens with a token issued via delegation." msgstr "" "Impossibile elencare i token della richiesta con un token emesso mediante " "delega." #, python-format msgid "Cannot open certificate %(cert_file)s. Reason: %(reason)s" msgstr "Impossibile aprire il certificato %(cert_file)s. Motivo: %(reason)s" #, python-format msgid "Cannot remove role that has not been granted, %s" msgstr "Impossibile rimuovere un ruolo che non è stato concesso, %s" msgid "" "Cannot truncate a driver call without hints list as first parameter after " "self " msgstr "" "Impossibile troncare una chiamata al driver senza hints list come primo " "parametro dopo self " msgid "Cannot update domain_id of a project that has children." msgstr "Impossibile aggiornare domain_id di un progetto con elementi child." msgid "" "Cannot use parents_as_list and parents_as_ids query params at the same time." msgstr "" "Impossibile utilizzare i parametri della query parents_as_list e " "parents_as_ids contemporaneamente." msgid "" "Cannot use subtree_as_list and subtree_as_ids query params at the same time." msgstr "" "Impossibile utilizzare i parametri della query subtree_as_list e " "subtree_as_ids contemporaneamente." msgid "Cascade update is only allowed for enabled attribute." msgstr "L'aggiornamento a catena è consentito solo per un attributo abilitato." msgid "" "Combining effective and group filter will always result in an empty list." msgstr "" "La combinazione del filtro operativo e di gruppo avrà sempre come risultato " "un elenco vuoto." msgid "" "Combining effective, domain and inherited filters will always result in an " "empty list." msgstr "" "La combinazione di filtri operativi, di dominio ed ereditati avrà sempre " "come risultato un elenco vuoto." #, python-format msgid "Config API entity at /domains/%s/config" msgstr "Entità API config in /domains/%s/config" #, python-format msgid "Conflict occurred attempting to store %(type)s - %(details)s" msgstr "" "Si è verificato un conflitto nel tentativo di archiviare %(type)s - " "%(details)s" #, python-format msgid "Conflicting region IDs specified: \"%(url_id)s\" != \"%(ref_id)s\"" msgstr "" "Sono stati specificati ID regione in conflitto: \"%(url_id)s\" != " "\"%(ref_id)s\"" msgid "Consumer not found" msgstr "Consumer non trovato" #, python-format msgid "" "Could not change immutable attribute(s) '%(attributes)s' in target %(target)s" msgstr "" "Impossibile modificare gli attributi non modificabili '%(attributes)s' nella " "destinazione %(target)s" #, python-format msgid "" "Could not determine Identity Provider ID. The configuration option " "%(issuer_attribute)s was not found in the request environment." msgstr "" "Impossibile determinare l'ID del provider di identità. L'opzione di " "configurazione %(issuer_attribute)s non è stata trovata nell'ambiente di " "richiesta. " #, python-format msgid "" "Could not find %(group_or_option)s in domain configuration for domain " "%(domain_id)s" msgstr "" "Impossibile trovare %(group_or_option)s nella configurazione del dominio per " "il dominio %(domain_id)s" #, python-format msgid "Could not find Endpoint Group: %(endpoint_group_id)s" msgstr "Impossibile trovare il gruppo di endpoint: %(endpoint_group_id)s" msgid "Could not find Identity Provider identifier in environment" msgstr "" "Impossibile trovare l'identificativo del provider identità nell'ambiente" #, python-format msgid "Could not find Identity Provider: %(idp_id)s" msgstr "Impossibile trovare il provider identità: %(idp_id)s" #, python-format msgid "Could not find Service Provider: %(sp_id)s" msgstr "Impossibile trovare il provider del servizio: %(sp_id)s" #, python-format msgid "Could not find credential: %(credential_id)s" msgstr "Impossibile trovare la credenziale: %(credential_id)s" #, python-format msgid "Could not find domain: %(domain_id)s" msgstr "Impossibile trovare il dominio: %(domain_id)s" #, python-format msgid "Could not find endpoint: %(endpoint_id)s" msgstr "Impossibile trovare l'endpoint: %(endpoint_id)s" #, python-format msgid "" "Could not find federated protocol %(protocol_id)s for Identity Provider: " "%(idp_id)s" msgstr "" "Impossibile trovare il protocollo federato %(protocol_id)s per il provider " "identità: %(idp_id)s" #, python-format msgid "Could not find group: %(group_id)s" msgstr "Impossibile trovare il gruppo: %(group_id)s" #, python-format msgid "Could not find mapping: %(mapping_id)s" msgstr "Impossibile trovare l'associazione: %(mapping_id)s" msgid "Could not find policy association" msgstr "Impossibile trovare l'associazione della politica" #, python-format msgid "Could not find policy: %(policy_id)s" msgstr "Impossibile trovare la politica: %(policy_id)s" #, python-format msgid "Could not find project: %(project_id)s" msgstr "Impossibile trovare il progetto: %(project_id)s" #, python-format msgid "Could not find region: %(region_id)s" msgstr "Impossibile trovare la regione: %(region_id)s" #, python-format msgid "" "Could not find role assignment with role: %(role_id)s, user or group: " "%(actor_id)s, project or domain: %(target_id)s" msgstr "" "Impossibile trovare l'assegnazione ruolo con il ruolo: %(role_id)s, utente o " "gruppo: %(actor_id)s, progetto o dominio: %(target_id)s" #, python-format msgid "Could not find role: %(role_id)s" msgstr "Impossibile trovare il ruolo: %(role_id)s" #, python-format msgid "Could not find service: %(service_id)s" msgstr "Impossibile trovare il servizio: %(service_id)s" #, python-format msgid "Could not find token: %(token_id)s" msgstr "Impossibile trovare il token: %(token_id)s" #, python-format msgid "Could not find trust: %(trust_id)s" msgstr "Impossibile trovare il trust: %(trust_id)s" #, python-format msgid "Could not find user: %(user_id)s" msgstr "Impossibile trovare l'utente: %(user_id)s" #, python-format msgid "Could not find version: %(version)s" msgstr "Impossibile trovare la versione: %(version)s" #, python-format msgid "Could not find: %(target)s" msgstr "Impossibile trovare: %(target)s" msgid "" "Could not map any federated user properties to identity values. Check debug " "logs or the mapping used for additional details." msgstr "" "Impossibile associare le proprietà dell'utente federato per identificare i " "valori. Controllare i log di debug o l'associazione utilizzata per ulteriori " "dettagli." msgid "" "Could not map user while setting ephemeral user identity. Either mapping " "rules must specify user id/name or REMOTE_USER environment variable must be " "set." msgstr "" "Impossibile associare l'utente durante l'impostazione dell'identità utente " "temporanea. Le regole di associazione devono specificare nome/id utente o la " "variabile di ambiente REMOTE_USER deve essereimpostata." msgid "Could not validate the access token" msgstr "Impossibile convalidare il token di accesso" msgid "Credential belongs to another user" msgstr "La credenziale appartiene ad un altro utente" msgid "Credential signature mismatch" msgstr "Mancata corrispondenza della firma delle credenziali" #, python-format msgid "" "Direct import of auth plugin %(name)r is deprecated as of Liberty in favor " "of its entrypoint from %(namespace)r and may be removed in N." msgstr "" "L'importazione diretta di auth plugin %(name)r è obsoleta a partire da " "Liberty rispetto al relativo entrypoint da %(namespace)r e potrebbe essere " "rimossa in N." #, python-format msgid "" "Direct import of driver %(name)r is deprecated as of Liberty in favor of its " "entrypoint from %(namespace)r and may be removed in N." msgstr "" "L'importazione diretta del driver %(name)r è obsoleta a partire da Liberty " "rispetto al relativo entrypoint da %(namespace)r e potrebbe essere rimossa " "in N." msgid "" "Disabling an entity where the 'enable' attribute is ignored by configuration." msgstr "" "Disabilitazione di un'entità in cui l'attributo 'enable' è ignorato dalla " "configurazione." #, python-format msgid "Domain (%s)" msgstr "Dominio (%s)" #, python-format msgid "Domain cannot be named %s" msgstr "Il dominio non può essere denominato %s" #, python-format msgid "Domain cannot have ID %s" msgstr "Il dominio non può avere l'ID %s" #, python-format msgid "Domain is disabled: %s" msgstr "Il dominio è disabilitato: %s" msgid "Domain name cannot contain reserved characters." msgstr "Il nome dominio non può contenere caratteri riservati." msgid "Domain scoped token is not supported" msgstr "L'ambito del dominio token non è supportato" msgid "Domain specific roles are not supported in the V8 role driver" msgstr "Ruoli specifici di dominio non sono supportati nel driver ruolo V8" #, python-format msgid "" "Domain: %(domain)s already has a configuration defined - ignoring file: " "%(file)s." msgstr "" "Il dominio: %(domain)s dispone già di una configurazione definita - si sta " "ignorando il file: %(file)s." msgid "Duplicate Entry" msgstr "Duplica voce" #, python-format msgid "Duplicate ID, %s." msgstr "ID duplicato, %s." #, python-format msgid "Duplicate entry: %s" msgstr "Voce duplicata: %s" #, python-format msgid "Duplicate name, %s." msgstr "Nome duplicato, %s." #, python-format msgid "Duplicate remote ID: %s" msgstr "ID remoto duplicato: %s" msgid "EC2 access key not found." msgstr "Chiave di accesso EC2 non trovata." msgid "EC2 signature not supplied." msgstr "Firma EC2 non fornita." msgid "" "Either --bootstrap-password argument or OS_BOOTSTRAP_PASSWORD must be set." msgstr "" "Entrambi gli argomenti bootstrap-password o OS_BOOTSTRAP_PASSWORD devono " "essere impostati." msgid "Enabled field must be a boolean" msgstr "Il campo Abilitato deve essere un valore booleano" msgid "Enabled field should be a boolean" msgstr "Il campo Abilitato deve essere un valore booleano" #, python-format msgid "Endpoint %(endpoint_id)s not found in project %(project_id)s" msgstr "Endpoint %(endpoint_id)s non trovato nel progetto %(project_id)s" msgid "Endpoint Group Project Association not found" msgstr "Associazione al progetto del gruppo di endpoint non trovata" msgid "Ensure configuration option idp_entity_id is set." msgstr "" "Accertarsi che l'opzione di configurazione idp_entity_id sia impostata." msgid "Ensure configuration option idp_sso_endpoint is set." msgstr "" "Accertarsi che l'opzione di configurazione idp_sso_endpoint sia impostata." #, python-format msgid "" "Error parsing configuration file for domain: %(domain)s, file: %(file)s." msgstr "" "Errore durante l'analisi del file di configurazione per il dominio: " "%(domain)s, file: %(file)s." #, python-format msgid "Error while opening file %(path)s: %(err)s" msgstr "Errore durante l'apertura del file %(path)s: %(err)s" #, python-format msgid "Error while parsing line: '%(line)s': %(err)s" msgstr "Errore durante l'analisi della riga: '%(line)s': %(err)s" #, python-format msgid "Error while parsing rules %(path)s: %(err)s" msgstr "Errore durante l'analisi delle regole %(path)s: %(err)s" #, python-format msgid "Error while reading metadata file, %(reason)s" msgstr "Errore durante le lettura del file di metadati, %(reason)s" #, python-format msgid "" "Exceeded attempts to register domain %(domain)s to use the SQL driver, the " "last domain that appears to have had it is %(last_domain)s, giving up" msgstr "" "Superato il numero di tentativi per registrare il dominio %(domain)s al fine " "di utilizzare il driver SQL, l'ultimo dominio che sembra avere avuto quel " "driver è %(last_domain)s, operazione terminata" #, python-format msgid "Expected dict or list: %s" msgstr "Previsto dict o list: %s" msgid "" "Expected signing certificates are not available on the server. Please check " "Keystone configuration." msgstr "" "I certificati di firma previsti non sono disponibili sul server. Controllare " "la configurazione Keystone." #, python-format msgid "" "Expecting to find %(attribute)s in %(target)s - the server could not comply " "with the request since it is either malformed or otherwise incorrect. The " "client is assumed to be in error." msgstr "" "previsto di trovare %(attribute)s in %(target)s - il server non è in grado " "di soddisfare la richiesta perché non è valido o non è corretto. Si ritiene " "che il client sia in errore." #, python-format msgid "Failed to start the %(name)s server" msgstr "Impossibile avviare il server %(name)s" msgid "Failed to validate token" msgstr "Impossibile convalidare il token" msgid "Federation token is expired" msgstr "Il token comune è scaduto" #, python-format msgid "" "Field \"remaining_uses\" is set to %(value)s while it must not be set in " "order to redelegate a trust" msgstr "" "Il campo \"remaining_uses\" è impostato su %(value)s mentre non deve essere " "impostato per assegnare una nuova delega ad un trust" msgid "Found invalid token: scoped to both project and domain." msgstr "trovato token non valido: in ambito sia di progetto che di dominio." #, python-format msgid "Group %s not found in config" msgstr "Gruppo %s non trovato in config" #, python-format msgid "Group %(group)s is not supported for domain specific configurations" msgstr "" "Il gruppo %(group)s non è supportato per le configurazioni specifiche del " "dominio" #, python-format msgid "" "Group %(group_id)s returned by mapping %(mapping_id)s was not found in the " "backend." msgstr "" "Il gruppo %(group_id)s restituito dall'associazione %(mapping_id)s non è " "stato trovato nel backend." #, python-format msgid "" "Group membership across backend boundaries is not allowed, group in question " "is %(group_id)s, user is %(user_id)s" msgstr "" "L'appartenenza al gruppo tra i limiti di backend non è consentita, il gruppo " "in questione è %(group_id)s, l'utente è %(user_id)s" #, python-format msgid "ID attribute %(id_attr)s not found in LDAP object %(dn)s" msgstr "Attributo ID %(id_attr)s non trovato nell'oggetto LDAP %(dn)s" #, python-format msgid "Identity Provider %(idp)s is disabled" msgstr "Il provider identità %(idp)s è disabilitato" msgid "" "Incoming identity provider identifier not included among the accepted " "identifiers." msgstr "" "L'identificativo del provider identità in entrata non è incluso tra gli " "identificativi accettati." msgid "Invalid EC2 signature." msgstr "Firma EC2 non valida." #, python-format msgid "Invalid LDAP TLS certs option: %(option)s. Choose one of: %(options)s" msgstr "" "Opzione certificazioni (certs) LDAP TLS non valida: %(option)s. Scegliere " "una delle seguenti: %(options)s" #, python-format msgid "Invalid LDAP TLS_AVAIL option: %s. TLS not available" msgstr "Opzione LDAP TLS_AVAIL non valida: %s. TLS non disponibile" #, python-format msgid "Invalid LDAP deref option: %(option)s. Choose one of: %(options)s" msgstr "" "Opzione deref LDAP non valida: %(option)s. Scegliere una tra: %(options)s" #, python-format msgid "Invalid LDAP scope: %(scope)s. Choose one of: %(options)s" msgstr "" "Ambito LDAP non valido: %(scope)s. Scegliere uno dei seguenti: %(options)s" msgid "Invalid TLS / LDAPS combination" msgstr "Combinazione TLS / LDAPS non valida" #, python-format msgid "Invalid audit info data type: %(data)s (%(type)s)" msgstr "" "Tipo di dati delle informazioni di verifica non valido: %(data)s (%(type)s)" msgid "Invalid blob in credential" msgstr "Blob non valido nella credenziale" #, python-format msgid "" "Invalid domain name: %(domain)s found in config file name: %(file)s - " "ignoring this file." msgstr "" "Nome dominio non valido: %(domain)s trovato nel nome file di configurazione: " "%(file)s - si sta ignorando questo file." #, python-format msgid "Invalid domain specific configuration: %(reason)s" msgstr "Configurazione specifica del dominio non valida: %(reason)s" #, python-format msgid "Invalid input for field '%(path)s'. The value is '%(value)s'." msgstr "Input non valido per il campo '%(path)s'. Il valore è '%(value)s'." msgid "Invalid limit value" msgstr "Valore del limite non valido" #, python-format msgid "" "Invalid mix of entities for policy association - only Endpoint, Service or " "Region+Service allowed. Request was - Endpoint: %(endpoint_id)s, Service: " "%(service_id)s, Region: %(region_id)s" msgstr "" "combinazione di entità non valida per l'associazione della politica - È " "consentito solo endpoint, servizio o regione+servizio. La richiesta era - " "Endpoint: %(endpoint_id)s, Servizio: %(service_id)s, Regione: %(region_id)s" #, python-format msgid "" "Invalid rule: %(identity_value)s. Both 'groups' and 'domain' keywords must " "be specified." msgstr "" "Regola non valida: %(identity_value)s. Entrambi le parole chiave 'groups' e " "'domain' devono essere specificate." msgid "Invalid signature" msgstr "Firma non valida" msgid "Invalid user / password" msgstr "Utente/password non validi" msgid "Invalid username or TOTP passcode" msgstr "username o passcode TOTP non validi" msgid "Invalid username or password" msgstr "username o password non validi" #, python-format msgid "KVS region %s is already configured. Cannot reconfigure." msgstr "La regione KVS %s è già configurata. Impossibile riconfigurare." #, python-format msgid "Key Value Store not configured: %s" msgstr "KVS (Key Value Store) non configurato: %s" #, python-format msgid "LDAP %s create" msgstr "LDAP %s crea" #, python-format msgid "LDAP %s delete" msgstr "LDAP %s elimina" #, python-format msgid "LDAP %s update" msgstr "LDAP %s aggiorna" msgid "" "Length of transformable resource id > 64, which is max allowed characters" msgstr "" "La lunghezza dell'id risorsa trasformabile è > 64, che rappresenta il numero " "massimo di caratteri consentiti" #, python-format msgid "" "Local section in mapping %(mapping_id)s refers to a remote match that " "doesn't exist (e.g. {0} in a local section)." msgstr "" "La sezione locale nell'associazione %(mapping_id)s si riferisce ad una " "corrispondenza remota che non esiste (ad esempio {0} in una sezione locale)." #, python-format msgid "Lock Timeout occurred for key, %(target)s" msgstr "Si è verificato un timeout di blocco per la chiave, %(target)s" #, python-format msgid "Lock key must match target key: %(lock)s != %(target)s" msgstr "" "La chiave di blocco deve corrispondere alla chiave di destinazione: " "%(lock)s != %(target)s" #, python-format msgid "Malformed endpoint URL (%(endpoint)s), see ERROR log for details." msgstr "" "Url dell'endpoint non corretto (%(endpoint)s), consultare il log ERROR per " "ulteriori dettagli." msgid "Marker could not be found" msgstr "Impossibile trovare l'indicatore" #, python-format msgid "Max hierarchy depth reached for %s branch." msgstr "Profondità massima della gerarchia raggiunta per il ramo %s." #, python-format msgid "Maximum lock attempts on %s occurred." msgstr "È stato raggiunto il numero massimo di tentativi di blocco su %s." #, python-format msgid "Member %(member)s is already a member of group %(group)s" msgstr "Il membro %(member)s è già un membro del gruppo %(group)s" #, python-format msgid "Method not callable: %s" msgstr "Metodo non richiamabile: %s" msgid "Missing entity ID from environment" msgstr "ID entità mancante dall'ambiente" msgid "" "Modifying \"redelegation_count\" upon redelegation is forbidden. Omitting " "this parameter is advised." msgstr "" "La modifica di \"redelegation_count\" dopo la riassegnazione della delega " "non è consentita. Si consiglia di omettere questo parametro." msgid "Multiple domains are not supported" msgstr "Non sono supportati più domini" msgid "Must be called within an active lock context." msgstr "Deve essere richiamato all'interno di un contesto di blocco attivo." msgid "Must specify either domain or project" msgstr "È necessario specificare il dominio o il progetto" msgid "Name field is required and cannot be empty" msgstr "Il campo relativo al nome è obbligatorio e non può essere vuoto" msgid "Neither Project Domain ID nor Project Domain Name was provided." msgstr "" "Non è stato fornito l'ID dominio progetto né il nome dominio progetto. " msgid "" "No Authorization headers found, cannot proceed with OAuth related calls, if " "running under HTTPd or Apache, ensure WSGIPassAuthorization is set to On." msgstr "" "Nessuna intestazione di autorizzazione trovata, impossibile procedere con le " "chiamate correlate a OAuth, se l'esecuzione avviene in ambito HTTPd o " "Apache, assicurarsi che WSGIPassAuthorization sia impostato su Attivo." msgid "No authenticated user" msgstr "Nessun utente autenticato" msgid "" "No encryption keys found; run keystone-manage fernet_setup to bootstrap one." msgstr "" "Nessuna chiave di codifica trovata; eseguire keystone-manage fernet_setup " "per eseguire un avvio." msgid "No options specified" msgstr "Nessuna opzione specificata" #, python-format msgid "No policy is associated with endpoint %(endpoint_id)s." msgstr "Nessuna politica associata all'endpoint %(endpoint_id)s." #, python-format msgid "No remaining uses for trust: %(trust_id)s" msgstr "Nessun utilizzo residuo per trust: %(trust_id)s" msgid "No token in the request" msgstr "Nessun token nella richiesta" msgid "Non-default domain is not supported" msgstr "Il dominio non predefinito non è supportato" msgid "One of the trust agents is disabled or deleted" msgstr "Uno degli agent trust è disabilitato o eliminato" #, python-format msgid "" "Option %(option)s found with no group specified while checking domain " "configuration request" msgstr "" "L'opzione %(option)s è stato trovato senza alcun gruppo specificato durante " "il controllo della richiesta di configurazione del dominio" #, python-format msgid "" "Option %(option)s in group %(group)s is not supported for domain specific " "configurations" msgstr "" "L'opzione %(option)s nel gruppo %(group)s non è supportata per le " "configurazioni specifiche del dominio" #, python-format msgid "Project (%s)" msgstr "Progetto (%s)" #, python-format msgid "Project ID not found: %(t_id)s" msgstr "ID progetto non trovato: %(t_id)s " msgid "Project field is required and cannot be empty." msgstr "Il campo progetto è obbligatorio e non può essere vuoto." #, python-format msgid "Project is disabled: %s" msgstr "Il progetto è disabilitato: %s" msgid "Project name cannot contain reserved characters." msgstr "Il nome progetto non può contenere caratteri riservati." msgid "Query string is not UTF-8 encoded" msgstr "La stringa di query non è codificata in UTF-8 " #, python-format msgid "" "Reading the default for option %(option)s in group %(group)s is not supported" msgstr "" "La lettura dell'impostazione predefinita per l'opzione %(option)s nel gruppo " "%(group)s non è supportata" msgid "Redelegation allowed for delegated by trust only" msgstr "" "Assegnazione di una nuova delega consentita solo per i delegati dal trust" #, python-format msgid "" "Remaining redelegation depth of %(redelegation_depth)d out of allowed range " "of [0..%(max_count)d]" msgstr "" "profondità di riassegnazione della delega rimanente %(redelegation_depth)d " "non compresa nell'intervallo consentito [0..%(max_count)d]" msgid "" "Remove admin_crud_extension from the paste pipeline, the admin_crud " "extension is now always available. Updatethe [pipeline:admin_api] section in " "keystone-paste.ini accordingly, as it will be removed in the O release." msgstr "" "Rimuovere admin_crud_extension dalla pipeline paste, l'estensione admin_crud " "è ora sempre disponibile. Aggiornare la sezione [pipeline:admin_api] in " "keystone-paste.ini di conseguenza, in quanto verrà rimossa nella release O." msgid "" "Remove endpoint_filter_extension from the paste pipeline, the endpoint " "filter extension is now always available. Update the [pipeline:api_v3] " "section in keystone-paste.ini accordingly as it will be removed in the O " "release." msgstr "" "Rimuovere endpoint_filter_extension dalla pipeline paste, l'estensione del " "filtro di endpoint è ora sempre disponibile. Aggiornare la sezione [pipeline:" "api_v3] in keystone-paste.ini di conseguenza, in quanto verrà rimossa nella " "release O." msgid "" "Remove federation_extension from the paste pipeline, the federation " "extension is now always available. Update the [pipeline:api_v3] section in " "keystone-paste.ini accordingly, as it will be removed in the O release." msgstr "" "Rimuovere federation_extension dalla pipeline paste, l'estensione federation " "è ora sempre disponibile. Aggiornare la sezione [pipeline:api_v3] in " "keystone-paste.ini di conseguenza, in quanto verrà rimossa nella release O." msgid "" "Remove oauth1_extension from the paste pipeline, the oauth1 extension is now " "always available. Update the [pipeline:api_v3] section in keystone-paste.ini " "accordingly, as it will be removed in the O release." msgstr "" "Rimuovere oauth1_extension dalla pipeline paste, l'estensione oauth1 è ora " "sempre disponibile. Aggiornare la sezione [pipeline:api_v3] in keystone-" "paste.ini di conseguenza, in quanto verrà rimossa nella release O." msgid "" "Remove revoke_extension from the paste pipeline, the revoke extension is now " "always available. Update the [pipeline:api_v3] section in keystone-paste.ini " "accordingly, as it will be removed in the O release." msgstr "" "Rimuovere revoke_extension dalla pipeline paste, l'estensione revoke è ora " "sempre disponibile. Aggiornare la sezione [pipeline:api_v3] in keystone-" "paste.ini di conseguenza, in quanto verrà rimossa nella release O." msgid "" "Remove simple_cert from the paste pipeline, the PKI and PKIz token providers " "are now deprecated and simple_cert was only used insupport of these token " "providers. Update the [pipeline:api_v3] section in keystone-paste.ini " "accordingly, as it will be removed in the O release." msgstr "" "Rimuovere simple_cert dalla pipeline paste, i provider di token PKI e PKIz " "sono ora obsoleti e simple_cert è stato utilizzato solo in supporto di " "questi provider di token. Aggiornare la sezione [pipeline:api_v3] in " "keystone-paste.ini di conseguenza, in quanto verrà rimossa nella release O." msgid "" "Remove user_crud_extension from the paste pipeline, the user_crud extension " "is now always available. Updatethe [pipeline:public_api] section in keystone-" "paste.ini accordingly, as it will be removed in the O release." msgstr "" "Rimuovere user_crud_extension dalla pipeline paste, l'estensione user_crud è " "ora sempre disponibile. Aggiornare la sezione [pipeline:admin_api] in " "keystone-paste.ini di conseguenza, in quanto verrà rimossa nella release O." msgid "Request Token does not have an authorizing user id" msgstr "" "Il token della richiesta non dispone di un id utente per l'autorizzazione" #, python-format msgid "" "Request attribute %(attribute)s must be less than or equal to %(size)i. The " "server could not comply with the request because the attribute size is " "invalid (too large). The client is assumed to be in error." msgstr "" "L'attributo della richiesta %(attribute)s deve essere minore o uguale a " "%(size)i. Il server non è riuscito a soddisfare la richiesta poiché la " "dimensione dell'attributo non è valido (troppo grande). Si ritiene che il " "client sia in errore." msgid "Request must have an origin query parameter" msgstr "La richiesta deve avere un parametro della query di origine" msgid "Request token is expired" msgstr "Il token della richiesta è scaduto" msgid "Request token not found" msgstr "token della richiesta non trovata" msgid "Requested expiration time is more than redelegated trust can provide" msgstr "" "Il tempo di scadenza richiesto è maggiore di quello che può essere fornito " "dal trust con delega riassegnata" #, python-format msgid "" "Requested redelegation depth of %(requested_count)d is greater than allowed " "%(max_count)d" msgstr "" "La profondità di riassegnazione della delega richiesta %(requested_count)d è " "maggiore del valore consentito %(max_count)d" msgid "" "Running keystone via eventlet is deprecated as of Kilo in favor of running " "in a WSGI server (e.g. mod_wsgi). Support for keystone under eventlet will " "be removed in the \"M\"-Release." msgstr "" "L'esecuzione del keystone via eventlet è obsoleta in Kilo, rispetto " "all'esecuzione in un server WSGI (ad esempio mod_wsgi). Il supporto per il " "keystone in eventlet verrà rimosso in \"M\"-Release." msgid "Scoping to both domain and project is not allowed" msgstr "Il controllo sia del dominio che del progetto non è consentito" msgid "Scoping to both domain and trust is not allowed" msgstr "Il controllo sia del dominio che di trust non è consentito" msgid "Scoping to both project and trust is not allowed" msgstr "Il controllo sia delprogetto che di trust non è consentito" #, python-format msgid "Service Provider %(sp)s is disabled" msgstr "Il Provider del servizio %(sp)s è disabilitato" msgid "Some of requested roles are not in redelegated trust" msgstr "" "Alcuni dei ruoli richiesti non sono presenti nel trust con delega riassegnata" msgid "Specify a domain or project, not both" msgstr "Specificare un dominio o un progetto, non entrambi" msgid "Specify a user or group, not both" msgstr "Specificare un utente o un gruppo, non entrambi" msgid "Specify one of domain or project" msgstr "Specificare uno valore di dominio o progetto" msgid "Specify one of user or group" msgstr "Specificare un valore di utente o gruppo" #, python-format msgid "" "String length exceeded.The length of string '%(string)s' exceeded the limit " "of column %(type)s(CHAR(%(length)d))." msgstr "" "È stata superata la lunghezza della stringa. La lunghezza della stringa " "'%(string)s' ha superato il limite della colonna %(type)s(CHAR(%(length)d))." msgid "Tenant name cannot contain reserved characters." msgstr "Il nome tenant non può contenere caratteri riservati." #, python-format msgid "" "The %s extension has been moved into keystone core and as such its " "migrations are maintained by the main keystone database control. Use the " "command: keystone-manage db_sync" msgstr "" "L'estensione %s è stata spostata nel keystone di base e le relative " "migrazioni vengono mantenute dal controllo di database keystone principale. " "Utilizzare il comando: keystone-manage db_sync" msgid "" "The 'expires_at' must not be before now. The server could not comply with " "the request since it is either malformed or otherwise incorrect. The client " "is assumed to be in error." msgstr "" "'expires_at' non deve essere prima ora. Il server non è riuscito a " "rispettare larichiesta perché è in formato errato o non corretta. Il client " "viene considerato in errore." msgid "The --all option cannot be used with the --domain-name option" msgstr "L'opzione --all non può essere utilizzata con l'opzione --domain-name" #, python-format msgid "The Keystone configuration file %(config_file)s could not be found." msgstr "" "Impossibile trovare il file di configurazione Keystone %(config_file)s." #, python-format msgid "" "The Keystone domain-specific configuration has specified more than one SQL " "driver (only one is permitted): %(source)s." msgstr "" "La configurazione specifica del dominio keystone ha specificato più di un " "driver SQL (solo uno è consentito): %(source)s." msgid "The action you have requested has not been implemented." msgstr "L'azione richiesta non è stata implementata." msgid "The authenticated user should match the trustor." msgstr "L'utente autenticato deve corrispondere al ruolo trustor." msgid "" "The certificates you requested are not available. It is likely that this " "server does not use PKI tokens otherwise this is the result of " "misconfiguration." msgstr "" "I certificati richiesti non sono disponibili. È probabile che questo server " "non utilizzi i token PKI, altrimenti questo è il risultato di una " "configurazione errata." msgid "The configured token provider does not support bind authentication." msgstr "Il provider di token configurato non supporta l'autenticazione bind. " msgid "The creation of projects acting as domains is not allowed in v2." msgstr "" "La creazione di progetti che agiscono come domini non è consentita in v2. " #, python-format msgid "" "The password length must be less than or equal to %(size)i. The server could " "not comply with the request because the password is invalid." msgstr "" "La lunghezza della password deve essere minore o uguale a %(size)i. Il " "server non è in grado di soddisfare la richiesta perché la password non è " "valida." msgid "The request you have made requires authentication." msgstr "La richiesta che è stata fatta richiede l'autenticazione." msgid "The resource could not be found." msgstr "Impossibile trovare la risorsa." msgid "" "The revoke call must not have both domain_id and project_id. This is a bug " "in the Keystone server. The current request is aborted." msgstr "" "La chiamata di revoca non deve avere entrambi domain_id e project_id. Questo " "è un bug nel server Keystone. La richiesta corrente è stata interrotta." msgid "The service you have requested is no longer available on this server." msgstr "Il servizio richiesto non è più disponibile su questo server." #, python-format msgid "" "The specified parent region %(parent_region_id)s would create a circular " "region hierarchy." msgstr "" "La regione parent specificata %(parent_region_id)s crea una gerarchia di " "regione circolare." #, python-format msgid "" "The value of group %(group)s specified in the config should be a dictionary " "of options" msgstr "" "Il valore del gruppo %(group)s specificato nella configurazione deve essere " "un dizionario di opzioni" msgid "There should not be any non-oauth parameters" msgstr "Non deve essere presente nessun parametro non-oauth" #, python-format msgid "This is not a recognized Fernet payload version: %s" msgstr "Questa non è una versione di payload Fernet riconosciuta: %s" #, python-format msgid "This is not a recognized Fernet token %s" msgstr "Questo non è un token Fernet %s riconosciuto " msgid "" "Timestamp not in expected format. The server could not comply with the " "request since it is either malformed or otherwise incorrect. The client is " "assumed to be in error." msgstr "" "Data/ora non nel formato previsto. Il server non è riuscito a rispettare la " "richiesta perché è in formato errato o non corretta. Il client viene " "considerato in errore." #, python-format msgid "" "To get a more detailed information on this error, re-run this command for " "the specific domain, i.e.: keystone-manage domain_config_upload --domain-" "name %s" msgstr "" "Per ottenere informazioni più dettagliate su questo errore, eseguire di " "nuovo questo comando per il dominio specificato, ad esempio: keystone-manage " "domain_config_upload --domain-name %s" msgid "Token belongs to another user" msgstr "Il token appartiene ad un altro utente" msgid "Token does not belong to specified tenant." msgstr "Il token non appartiene al tenant specificato." msgid "Token version is unrecognizable or unsupported." msgstr "La versione token non è riconoscibile o non supportata. " msgid "Trustee has no delegated roles." msgstr "Trustee non ha ruoli delegati." msgid "Trustor is disabled." msgstr "Trustor è disabilitato." #, python-format msgid "" "Trying to update group %(group)s, so that, and only that, group must be " "specified in the config" msgstr "" "Tentativo di aggiornare il gruppo %(group)s, pertanto, solo quel gruppo deve " "essere specificato nella configurazione" #, python-format msgid "" "Trying to update option %(option)s in group %(group)s, but config provided " "contains option %(option_other)s instead" msgstr "" "Tentativo di aggiornare l'opzione %(option)s nel gruppo %(group)s, ma la " "configurazione fornita contiene l'opzione %(option_other)s" #, python-format msgid "" "Trying to update option %(option)s in group %(group)s, so that, and only " "that, option must be specified in the config" msgstr "" "Tentativo di aggiornare l'opzione %(option)s nel gruppo %(group)s, pertanto, " "solo quell'opzione deve essere specificata nella configurazione" msgid "" "Unable to access the keystone database, please check it is configured " "correctly." msgstr "" "Impossibile accedere al database del keystone, controllare se è configurato " "correttamente." #, python-format msgid "Unable to consume trust %(trust_id)s, unable to acquire lock." msgstr "" "Impossibile utilizzare trust %(trust_id)s, impossibile acquisire il blocco." #, python-format msgid "" "Unable to delete region %(region_id)s because it or its child regions have " "associated endpoints." msgstr "" "Impossibile eliminare la regione %(region_id)s perché la regione o le " "relative regioni child hanno degli endpoint associati." msgid "Unable to downgrade schema" msgstr "Impossibile eseguire il downgrade dello schema" #, python-format msgid "Unable to find valid groups while using mapping %(mapping_id)s" msgstr "" "Impossibile trovare i gruppi validi durante l'utilizzo dell'associazione " "%(mapping_id)s" #, python-format msgid "Unable to locate domain config directory: %s" msgstr "Impossibile individuare la directory config del dominio: %s" #, python-format msgid "Unable to lookup user %s" msgstr "Impossibile eseguire la ricerca dell'utente %s" #, python-format msgid "" "Unable to reconcile identity attribute %(attribute)s as it has conflicting " "values %(new)s and %(old)s" msgstr "" "Impossibile riconciliare l'attributo identity %(attribute)s poiché ha " "valori in conflitto tra i %(new)s e i %(old)s" #, python-format msgid "" "Unable to sign SAML assertion. It is likely that this server does not have " "xmlsec1 installed, or this is the result of misconfiguration. Reason " "%(reason)s" msgstr "" "Impossibile firmare l'asserzione SAML. Probabilmente questo server non " "dispone di xmlsec1 installato o è il risultato di una configurazione " "sbagliata. Motivo %(reason)s" msgid "Unable to sign token." msgstr "Impossibile firmare il token." #, python-format msgid "Unexpected assignment type encountered, %s" msgstr "È stato rilevato un tipo di assegnazione non previsto, %s" #, python-format msgid "" "Unexpected combination of grant attributes - User: %(user_id)s, Group: " "%(group_id)s, Project: %(project_id)s, Domain: %(domain_id)s" msgstr "" "Combinazione non prevista degli attributi di autorizzazione - Utente: " "%(user_id)s, Gruppo: %(group_id)s, Progetto: %(project_id)s, Dominio: " "%(domain_id)s" #, python-format msgid "Unexpected status requested for JSON Home response, %s" msgstr "Stato non previsto richiesto per la risposta JSON Home, %s" msgid "Unknown Target" msgstr "Destinazione sconosciuta" #, python-format msgid "Unknown domain '%(name)s' specified by --domain-name" msgstr "Dominio sconosciuto '%(name)s' specificato da --domain-name" #, python-format msgid "Unknown token version %s" msgstr "Versione di token sconosciuta %s" #, python-format msgid "Unregistered dependency: %(name)s for %(targets)s" msgstr "Dipendenza non registrata: %(name)s per %(targets)s" msgid "Update of `domain_id` is not allowed." msgstr "Aggiornamento di `domain_id` non consentito." msgid "Update of `is_domain` is not allowed." msgstr "Aggiornamento di `is_domain` non consentito." msgid "Update of `parent_id` is not allowed." msgstr "Aggiornamento di `parent_id` non consentito." msgid "Update of domain_id is only allowed for root projects." msgstr "L'aggiornamento di domain_id è consentito solo per progetti root." msgid "Update of domain_id of projects acting as domains is not allowed." msgstr "" "L'aggiornamento di domain_id di progetti che agiscono come domini non è " "consentito." msgid "Use a project scoped token when attempting to create a SAML assertion" msgstr "" "Utilizzare un token nell'ambito del progetto quando si tenta di creare " "un'asserzione SAML" msgid "" "Use of the identity driver config to automatically configure the same " "assignment driver has been deprecated, in the \"O\" release, the assignment " "driver will need to be expicitly configured if different than the default " "(SQL)." msgstr "" "Utilizzare la configurazione del driver di identità per configurare " "automaticamente la stessa assegnazione. Il driver è obsoleto nella release " "\"O\". Il driver di assegnazione dovrà essere configurato esplicitamente se " "diverso dal driver predefinito (SQL)." #, python-format msgid "User %(u_id)s is unauthorized for tenant %(t_id)s" msgstr "L'utente %(u_id)s non è autorizzato per il tenant %(t_id)s" #, python-format msgid "User %(user_id)s has no access to domain %(domain_id)s" msgstr "L'utente %(user_id)s non ha accesso al dominio %(domain_id)s" #, python-format msgid "User %(user_id)s has no access to project %(project_id)s" msgstr "L'utente %(user_id)s non ha accesso al progetto %(project_id)s" #, python-format msgid "User %(user_id)s is already a member of group %(group_id)s" msgstr "L'utente %(user_id)s è già membro del gruppo %(group_id)s" #, python-format msgid "User '%(user_id)s' not found in group '%(group_id)s'" msgstr "L'utente '%(user_id)s' non è stato trovato nel gruppo '%(group_id)s'" msgid "User IDs do not match" msgstr "Gli ID utente non corrispondono" msgid "" "User auth cannot be built due to missing either user id, or user name with " "domain id, or user name with domain name." msgstr "" "L'autorizzazione utente non può essere creata perché manca l'id utente o il " "nome utente con l'id dominio o il nome utente con il nome dominio. " #, python-format msgid "User is disabled: %s" msgstr "L'utente è disabilitato: %s" msgid "User is not a member of the requested project" msgstr "L'utente non è un membro del progetto richiesto" msgid "User is not a trustee." msgstr "L'utente non è un amministratore." msgid "User not found" msgstr "Utente non trovato" msgid "User not valid for tenant." msgstr "Utente non valido per il tenant." msgid "User roles not supported: tenant_id required" msgstr "Ruoli utente non supportati: richiesto tenant_id" #, python-format msgid "User type %s not supported" msgstr "Tipo utente %s non supportato" msgid "You are not authorized to perform the requested action." msgstr "Non si possiede l'autorizzazione per eseguire l'operazione richiesta." #, python-format msgid "You are not authorized to perform the requested action: %(action)s" msgstr "L'utente non è autorizzato ad eseguire l'azione richiesta: %(action)s" msgid "" "You have tried to create a resource using the admin token. As this token is " "not within a domain you must explicitly include a domain for this resource " "to belong to." msgstr "" "Si è cercato di creare una risorsa utilizzando il token admin. Poiché questo " "token non si trova all'interno di un dominio, è necessario includere " "esplicitamente un dominio per fare in modo che questa risorsa vi appartenga." msgid "`key_mangler` functions must be callable." msgstr "Le funzioni `key_mangler` devono essere disponibili per la chiamata." msgid "`key_mangler` option must be a function reference" msgstr "L'opzione `key_mangler` deve essere un riferimento funzione" msgid "any options" msgstr "qualsiasi opzione" msgid "auth_type is not Negotiate" msgstr "auth_type non è Negotiate" msgid "authorizing user does not have role required" msgstr "l'utente per l'autorizzazione non dispone del ruolo richiesto" #, python-format msgid "cannot create a project in a branch containing a disabled project: %s" msgstr "" "impossibile creare un progetto in un ramo che contiene un progetto " "disabilitato: %s" #, python-format msgid "" "cannot delete an enabled project acting as a domain. Please disable the " "project %s first." msgstr "" "impossibile eliminare un progetto abilitato che agisce come un dominio. " "Disabilitare prima il progetto %s." #, python-format msgid "group %(group)s" msgstr "gruppo %(group)s" msgid "" "idp_contact_type must be one of: [technical, other, support, administrative " "or billing." msgstr "" "idp_contact_type deve essere uno tra: [tecnico, altro, supporto, " "amministrativo o di fatturazione." #, python-format msgid "invalid date format %s" msgstr "formato data non valido %s" #, python-format msgid "" "it is not permitted to have two projects acting as domains with the same " "name: %s" msgstr "" "non è consentito avere due progetti che agiscono con lo stesso nome: %s" #, python-format msgid "" "it is not permitted to have two projects within a domain with the same " "name : %s" msgstr "" "non è consentito avere due progetti all'interno di un dominio con lo stesso " "nome: %s" msgid "only root projects are allowed to act as domains." msgstr "Solo ai progetti root è consentito agire come domini." #, python-format msgid "option %(option)s in group %(group)s" msgstr "opzione %(option)s nel gruppo %(group)s" msgid "provided consumer key does not match stored consumer key" msgstr "" "La chiave consumer fornita non corrisponde alla chiave consumer memorizzata" msgid "provided request key does not match stored request key" msgstr "" "La chiave della richiesta fornita non corrisponde alla chiave della " "richiesta memorizzata" msgid "provided verifier does not match stored verifier" msgstr "il verificatore fornito non corrisponde al verificatore memorizzato" msgid "remaining_uses must be a positive integer or null." msgstr "remaining_uses deve essere un numero intero positivo o nullo." msgid "remaining_uses must not be set if redelegation is allowed" msgstr "" "remaining_uses non deve essere impostato se è consentita la riassegnazione " "della delega" #, python-format msgid "" "request to update group %(group)s, but config provided contains group " "%(group_other)s instead" msgstr "" "Richiesta di aggiornamento del gruppo %(group)s, ma la configurazione " "fornita contiene il gruppo %(group_other)s" msgid "rescope a scoped token" msgstr "riassegna ambito a token con ambito" #, python-format msgid "role %s is not defined" msgstr "il ruolo %s non è definito" msgid "scope.project.id must be specified if include_subtree is also specified" msgstr "" "scope.project.id deve essere specificato se è specificato anche " "include_subtree" #, python-format msgid "tls_cacertdir %s not found or is not a directory" msgstr "Impossibile trovare tls_cacertdir %s o non è una directory" #, python-format msgid "tls_cacertfile %s not found or is not a file" msgstr "Impossibile trovare tls_cacertfile %s o non è un file" #, python-format msgid "token reference must be a KeystoneToken type, got: %s" msgstr "" "il riferimento al token deve essere un tipo KeystoneToken, ottenuto: %s" msgid "" "update of domain_id is deprecated as of Mitaka and will be removed in O." msgstr "" "l'aggiornamento di domain_id è obsoleto a partire da Mitaka e verrà rimosso " "in O." #, python-format msgid "" "validated expected to find %(param_name)r in function signature for " "%(func_name)r." msgstr "" "la convalida prevede di trovare %(param_name)r nella firma funzione per " "%(func_name)r." keystone-9.0.0/keystone/locale/de/0000775000567000056710000000000012701407246020215 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/locale/de/LC_MESSAGES/0000775000567000056710000000000012701407246022002 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/locale/de/LC_MESSAGES/keystone-log-critical.po0000664000567000056710000000155012701407102026542 0ustar jenkinsjenkins00000000000000# Translations template for keystone. # Copyright (C) 2015 OpenStack Foundation # This file is distributed under the same license as the keystone project. # # Translators: # OpenStack Infra , 2015. #zanata msgid "" msgstr "" "Project-Id-Version: keystone 9.0.0.0b2.dev256\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n" "POT-Creation-Date: 2016-01-18 05:50+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2014-08-31 03:19+0000\n" "Last-Translator: openstackjenkins \n" "Language: de\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: German\n" #, python-format msgid "Unable to open template file %s" msgstr "Vorlagendatei %s kann nicht geöffnet werden" keystone-9.0.0/keystone/locale/de/LC_MESSAGES/keystone.po0000664000567000056710000016173312701407105024210 0ustar jenkinsjenkins00000000000000# Translations template for keystone. # Copyright (C) 2015 OpenStack Foundation # This file is distributed under the same license as the keystone project. # # Translators: # Ettore Atalan , 2014 # Robert Simai, 2014 # Reik Keutterling , 2015 # Frank Kloeker , 2016. #zanata # Monika Wolf , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: keystone 9.0.0.0rc2.dev8\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n" "POT-Creation-Date: 2016-03-24 10:41+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-24 03:13+0000\n" "Last-Translator: Monika Wolf \n" "Language: de\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: German\n" #, python-format msgid "%(detail)s" msgstr "%(detail)s" #, python-format msgid "%(driver)s is not supported driver version" msgstr "%(driver)s ist keine unterstützte Treiberversion." #, python-format msgid "" "%(entity)s name cannot contain the following reserved characters: %(chars)s" msgstr "" "Der %(entity)s-Name darf nicht die folgenden reservierten Zeichen enthalten: " "%(chars)s" #, python-format msgid "" "%(event)s is not a valid notification event, must be one of: %(actions)s" msgstr "" "%(event)s ist kein gültiges Benachrichtigungsereignis; erforderlich ist " "%(actions)s" #, python-format msgid "%(host)s is not a trusted dashboard host" msgstr "%(host)s ist kein vertrauenswürdiger Dashboard-Host" #, python-format msgid "%(message)s %(amendment)s" msgstr "%(message)s %(amendment)s" #, python-format msgid "" "%(mod_name)s doesn't provide database migrations. The migration repository " "path at %(path)s doesn't exist or isn't a directory." msgstr "" "%(mod_name)s bietet keine Datenbankmigrationen. Der Migrations-Repository-" "Pfad unter %(path)s ist nicht vorhanden oder ist kein Verzeichnis." #, python-format msgid "%(prior_role_id)s does not imply %(implied_role_id)s" msgstr "%(prior_role_id)s impliziert nicht %(implied_role_id)s" #, python-format msgid "%(property_name)s cannot be less than %(min_length)s characters." msgstr "%(property_name)s darf nicht kleiner als %(min_length)s Zeichen sein." #, python-format msgid "%(property_name)s is not a %(display_expected_type)s" msgstr "%(property_name)s ist nicht %(display_expected_type)s" #, python-format msgid "%(property_name)s should not be greater than %(max_length)s characters." msgstr "%(property_name)s sollte nicht größer als %(max_length)s Zeichen sein." #, python-format msgid "%(role_id)s cannot be an implied roles" msgstr "%(role_id)s darf keine implizierte Rolle sein" #, python-format msgid "%s cannot be empty." msgstr "%s darf nicht leer sein." #, python-format msgid "%s extension does not exist." msgstr "Erweiterung %s ist nicht vorhanden." #, python-format msgid "%s field is required and cannot be empty" msgstr "%s-Feld ist erforderlich und darf nicht leer sein" #, python-format msgid "%s field(s) cannot be empty" msgstr "%s-Felder können nicht leer sein" #, python-format msgid "" "%s for the LDAP identity backend has been deprecated in the Mitaka release " "in favor of read-only identity LDAP access. It will be removed in the \"O\" " "release." msgstr "" "%s für das LDAP-ID-Back-End wurde in Mitaka zugunsten des schreibgeschützten " "ID-LDAP-Zugriffs eingestellt und wird im \"O\"-Release entfernt." msgid "(Disable insecure_debug mode to suppress these details.)" msgstr "(Modus insecure_debug inaktivieren, um diese Details zu unterdrücken.)" msgid "--all option cannot be mixed with other options" msgstr "--all-Option kann nicht zusammen mit anderen Optionen verwendet werden" msgid "A project-scoped token is required to produce a service catalog." msgstr "" "Ein projektorientiertes Token ist zum Produzieren eines Dienstekatalogs " "erforderlich." msgid "Access token is expired" msgstr "Zugriffstoken ist abgelaufen" msgid "Access token not found" msgstr "Zugriffstoken nicht gefunden" msgid "Additional authentications steps required." msgstr "Zusätzliche Authentifizierungsschritte sind notwendig." msgid "An unexpected error occurred when retrieving domain configs" msgstr "" "Beim Abrufen der Domänenkonfigurationen ist ein unerwarteter Fehler " "aufgetreten" #, python-format msgid "An unexpected error occurred when trying to store %s" msgstr "Beim Versuch, %s zu speichern, ist ein unerwarteter Fehler aufgetreten" msgid "An unexpected error prevented the server from fulfilling your request." msgstr "" "Wegen eines unerwarteten Fehlers konnte der Server Ihre Anforderung nicht " "ausführen." #, python-format msgid "" "An unexpected error prevented the server from fulfilling your request: " "%(exception)s" msgstr "" "Wegen eines unerwarteten Fehlers konnte der Server Ihre Anforderung nicht " "ausführen: %(exception)s" msgid "An unhandled exception has occurred: Could not find metadata." msgstr "" "Eine nicht behandelte Ausnahme ist aufgetreten: Metadaten konnten nicht " "gefunden werden." msgid "At least one option must be provided" msgstr "Mindestens eine Option muss angegeben werden" msgid "At least one option must be provided, use either --all or --domain-name" msgstr "" "Mindestens eine Option muss angegeben werden. Verwenden Sie entweder --all " "oder --domain-name" msgid "At least one role should be specified." msgstr "Mindestens eine Rolle sollte angegeben werden." #, python-format msgid "" "Attempted automatic driver selection for assignment based upon " "[identity]\\driver option failed since driver %s is not found. Set " "[assignment]/driver to a valid driver in keystone config." msgstr "" "Der Versuch, für die Zuordnung den Treiber basierend auf der Option " "[identity]\\driver automatisch auszuwählen, ist fehlgeschlagen, da der " "Treiber %s nicht gefunden wurde. Setzen Sie die Option [assignment]/driver " "in der Keystone-Konfiguration auf einen gültigen Treiber." msgid "Attempted to authenticate with an unsupported method." msgstr "Versuch einer Authentifizierung mit einer nicht unterstützten Methode." msgid "" "Attempting to use OS-FEDERATION token with V2 Identity Service, use V3 " "Authentication" msgstr "" "Versuch, OS-FEDERATION-Token mit V2 Identity Service zu verwenden, verwenden " "Sie v3- Authentifizierung" msgid "Authentication plugin error." msgstr "Authentifizierung-Plugin-Fehler" #, python-format msgid "" "Backend `%(backend)s` is not a valid memcached backend. Valid backends: " "%(backend_list)s" msgstr "" "Back-End '%(backend)s' ist kein gültiges memcached Back-End. Gültige Back-" "Ends: %(backend_list)s" msgid "Cannot authorize a request token with a token issued via delegation." msgstr "" "Anforderungstoken kann mit einem per Delegierung ausgegebenen Token nicht " "autorisiert werden." #, python-format msgid "Cannot change %(option_name)s %(attr)s" msgstr "%(option_name)s %(attr)s kann nicht geändert werden" msgid "Cannot change Domain ID" msgstr "Die Domänen-ID kann nicht geändert werden" msgid "Cannot change user ID" msgstr "Benutzer-ID kann nicht geändert werden" msgid "Cannot change user name" msgstr "Benutzername kann nicht geändert werden" #, python-format msgid "Cannot create an endpoint with an invalid URL: %(url)s" msgstr "" "Es kann kein Endpunkt mit einer ungültigen URL erstellt werden: %(url)s" #, python-format msgid "Cannot create project with parent: %(project_id)s" msgstr "" "Projekt kann nicht mit dem übergeordneten Element %(project_id)s erstellt " "werden" #, python-format msgid "" "Cannot create project, since it specifies its owner as domain %(domain_id)s, " "but specifies a parent in a different domain (%(parent_domain_id)s)." msgstr "" "Das Projekt kann nicht erstellt werden, da es den zugehörigen Eigner als " "Domäne %(domain_id)s angibt, jedoch ein übergeordnetes Projekt in einer " "anderen Domäne (%(parent_domain_id)s) angibt." #, python-format msgid "" "Cannot create project, since its parent (%(domain_id)s) is acting as a " "domain, but project's specified parent_id (%(parent_id)s) does not match " "this domain_id." msgstr "" "Das Projekt kann nicht erstellt werden, da das zugehörige übergeordnete " "Projekt (%(domain_id)s) als Domäne fungiert, aber die für das Projekt " "angegebene 'parent_id' (%(parent_id)s) nicht mit dieser 'domain_id' " "übereinstimmt." msgid "Cannot delete a domain that is enabled, please disable it first." msgstr "" "Eine aktivierte Domäne kann nicht gelöscht werden. Deaktivieren Sie sie " "zuerst." #, python-format msgid "" "Cannot delete project %(project_id)s since its subtree contains enabled " "projects." msgstr "" "Kann Projekt %(project_id)s nicht löschen, da die zugehörige untergeordnete " "Baumstruktur aktivierte Projekte enthält." #, python-format msgid "" "Cannot delete the project %s since it is not a leaf in the hierarchy. Use " "the cascade option if you want to delete a whole subtree." msgstr "" "Das Projekt %s kann nicht gelöscht werden, da es kein Blattelement in der " "Hierarchie darstellt. Verwenden Sie die Option 'cascade', wenn Sie eine " "vollständige, untergeordnete Baumstruktur löschen möchten. " #, python-format msgid "" "Cannot disable project %(project_id)s since its subtree contains enabled " "projects." msgstr "" "Kann Projekt %(project_id)s nicht deaktivieren, da die zugehörige " "untergeordnete Baumstruktur aktivierte Projekte enthält." #, python-format msgid "Cannot enable project %s since it has disabled parents" msgstr "" "Kann Projekt %s nicht aktivieren, da es über inaktivierte übergeordnete " "Projekte verfügt" msgid "Cannot list assignments sourced from groups and filtered by user ID." msgstr "" "Aus Gruppen erstellte und nach Benutzer-ID gefilterte Zuordnungen können " "nicht aufgelistet werden." msgid "Cannot list request tokens with a token issued via delegation." msgstr "" "Anforderungstokens können mit einem per Delegierung ausgegebenen Token nicht " "aufgelistet werden." #, python-format msgid "Cannot open certificate %(cert_file)s. Reason: %(reason)s" msgstr "" "Zertifikat %(cert_file)s kann nicht geöffnet werden. Ursache: %(reason)s" #, python-format msgid "Cannot remove role that has not been granted, %s" msgstr "Nicht gewährte Rolle kann nicht entfernt werden, %s" msgid "" "Cannot truncate a driver call without hints list as first parameter after " "self " msgstr "" "Abschneiden eines Treiberaufrufs ohne Hinweisliste als erstem Parameter nach " "dem Treiber nicht möglich " msgid "Cannot update domain_id of a project that has children." msgstr "" "Die Aktualisierung von 'domain_id' eines Projekts mit untergeordneten " "Projekten ist nicht möglich." msgid "" "Cannot use parents_as_list and parents_as_ids query params at the same time." msgstr "" "Die Abfrageparameter parents_as_list und parents_as_ids können nicht " "gleichzeitig verwendet werden." msgid "" "Cannot use subtree_as_list and subtree_as_ids query params at the same time." msgstr "" "Die Abfrageparameter subtree_as_list und subtree_as_ids können nicht " "gleichzeitig verwendet werden." msgid "Cascade update is only allowed for enabled attribute." msgstr "" "Die Aktualisierungsweitergabe ist nur für aktivierte Attribute zulässig." msgid "" "Combining effective and group filter will always result in an empty list." msgstr "" "Die Kombination von effektivem Filter und Gruppenfilter führt immer zu einer " "leeren Liste." msgid "" "Combining effective, domain and inherited filters will always result in an " "empty list." msgstr "" "Die Kombination von effektivem Filter, Domänenfilter und vererbten Filtern " "führt immer zu einer leeren Liste." #, python-format msgid "Config API entity at /domains/%s/config" msgstr "Konfigurations-API-Entität unter /domains/%s/config" #, python-format msgid "Conflict occurred attempting to store %(type)s - %(details)s" msgstr "Konflikt beim Versuch, %(type)s zu speichern - %(details)s" #, python-format msgid "Conflicting region IDs specified: \"%(url_id)s\" != \"%(ref_id)s\"" msgstr "" "Angabe von Regions-IDs, die miteinander im Konflikt stehen: \"%(url_id)s\" !" "= \"%(ref_id)s\"" msgid "Consumer not found" msgstr "Kunde nicht gefunden" #, python-format msgid "" "Could not change immutable attribute(s) '%(attributes)s' in target %(target)s" msgstr "" "Unveränderliche Attribute '%(attributes)s' konnten nicht geändert werden in " "Ziel %(target)s" #, python-format msgid "" "Could not determine Identity Provider ID. The configuration option " "%(issuer_attribute)s was not found in the request environment." msgstr "" "Identitätsprovider-ID nicht gefunden. Die Konfigurationsoption " "%(issuer_attribute)s wurde in der Anforderungsumgebung nicht gefunden." #, python-format msgid "" "Could not find %(group_or_option)s in domain configuration for domain " "%(domain_id)s" msgstr "" "%(group_or_option)s konnte in der Domänenkonfiguration für Domäne " "%(domain_id)s nicht gefunden werden" #, python-format msgid "Could not find Endpoint Group: %(endpoint_group_id)s" msgstr "Endpunktgruppe konnte nicht gefunden werden: %(endpoint_group_id)s" msgid "Could not find Identity Provider identifier in environment" msgstr "Identitätsprovider-ID konnte in der Umgebung nicht gefunden werden" #, python-format msgid "Could not find Identity Provider: %(idp_id)s" msgstr "Identitätsprovider %(idp_id)s konnte nicht gefunden werden" #, python-format msgid "Could not find Service Provider: %(sp_id)s" msgstr "Service-Provider %(sp_id)s konnte nicht gefunden werden" #, python-format msgid "Could not find credential: %(credential_id)s" msgstr "Berechtigungsnachweis %(credential_id)s konnte nicht gefunden werden" #, python-format msgid "Could not find domain: %(domain_id)s" msgstr "Domäne %(domain_id)s konnte nicht gefunden werden" #, python-format msgid "Could not find endpoint: %(endpoint_id)s" msgstr "Endpunkt %(endpoint_id)s konnte nicht gefunden werden" #, python-format msgid "" "Could not find federated protocol %(protocol_id)s for Identity Provider: " "%(idp_id)s" msgstr "" "Föderiertes Protokoll %(protocol_id)s konnte nicht gefunden werden für " "Identitätsprovider: %(idp_id)s" #, python-format msgid "Could not find group: %(group_id)s" msgstr "Gruppe %(group_id)s konnte nicht gefunden werden" #, python-format msgid "Could not find mapping: %(mapping_id)s" msgstr "Zuordnung %(mapping_id)s konnte nicht gefunden werden" msgid "Could not find policy association" msgstr "Richtlinienzuordnung konnte nicht gefunden werden" #, python-format msgid "Could not find policy: %(policy_id)s" msgstr "Richtlinie %(policy_id)s konnte nicht gefunden werden" #, python-format msgid "Could not find project: %(project_id)s" msgstr "Projekt %(project_id)s konnte nicht gefunden werden" #, python-format msgid "Could not find region: %(region_id)s" msgstr "Region %(region_id)s konnte nicht gefunden werden" #, python-format msgid "" "Could not find role assignment with role: %(role_id)s, user or group: " "%(actor_id)s, project or domain: %(target_id)s" msgstr "" "Rollenzuordnung mit Rolle: %(role_id)s, Benutzer oder Gruppe: %(actor_id)s, " "Projekt oder Domäne: %(target_id)s, konnte nicht gefunden werden" #, python-format msgid "Could not find role: %(role_id)s" msgstr "Rolle %(role_id)s konnte nicht gefunden werden" #, python-format msgid "Could not find service: %(service_id)s" msgstr "Dienst %(service_id)s konnte nicht gefunden werden" #, python-format msgid "Could not find token: %(token_id)s" msgstr "Token %(token_id)s konnte nicht gefunden werden" #, python-format msgid "Could not find trust: %(trust_id)s" msgstr "Vertrauensbeziehung %(trust_id)s konnte nicht gefunden werden" #, python-format msgid "Could not find user: %(user_id)s" msgstr "Benutzer %(user_id)s konnte nicht gefunden werden" #, python-format msgid "Could not find version: %(version)s" msgstr "Version %(version)s konnte nicht gefunden werden" #, python-format msgid "Could not find: %(target)s" msgstr "Konnte nicht gefunden werden: %(target)s" msgid "" "Could not map any federated user properties to identity values. Check debug " "logs or the mapping used for additional details." msgstr "" "Es konnten keine eingebundenen Benutzereigenschaften Identitätswerten " "zugeordnet werden. Überprüfen Sie die Debugprotokolle oder die verwendete " "Zuordnung, um weitere Details zu erhalten." msgid "" "Could not map user while setting ephemeral user identity. Either mapping " "rules must specify user id/name or REMOTE_USER environment variable must be " "set." msgstr "" "Benutzer konnte beim Festlegen der ephemeren Benutzeridentität nicht " "zugeordnet werden. Entweder muss in Zuordnungsregeln Benutzer-ID/Name " "angegeben werden oder Umgebungsvariable REMOTE_USER muss festgelegt werden." msgid "Could not validate the access token" msgstr "Das Zugriffstoken konnte nicht geprüft werden" msgid "Credential belongs to another user" msgstr "Berechtigungsnachweis gehört einem anderen Benutzer" msgid "Credential signature mismatch" msgstr "Übereinstimmungsfehler bei Berechtigungssignatur" #, python-format msgid "" "Direct import of auth plugin %(name)r is deprecated as of Liberty in favor " "of its entrypoint from %(namespace)r and may be removed in N." msgstr "" "Der direkte Import des Authentifizierungsplugins %(name)r wird zugunsten des " "zugehörigen Einstiegspunkts aus %(namespace)r seit Liberty nicht mehr " "unterstützt und wird möglicherweise im N-Release entfernt." #, python-format msgid "" "Direct import of driver %(name)r is deprecated as of Liberty in favor of its " "entrypoint from %(namespace)r and may be removed in N." msgstr "" "Der direkte Import des Treibers %(name)r wird zugunsten des zugehörigen " "Einstiegspunkts aus %(namespace)r seit Liberty nicht mehr unterstützt und " "wird möglicherweise im N-Release entfernt." msgid "" "Disabling an entity where the 'enable' attribute is ignored by configuration." msgstr "" "Eine Entität inaktivieren, in der das Attribut 'enable' ignoriert wird von " #, python-format msgid "Domain (%s)" msgstr "Domain (%s)" #, python-format msgid "Domain cannot be named %s" msgstr "Domäne kann nicht mit %s benannt werden" #, python-format msgid "Domain cannot have ID %s" msgstr "Domäne kann nicht die ID %s haben" #, python-format msgid "Domain is disabled: %s" msgstr "Domäne ist inaktiviert: %s" msgid "Domain name cannot contain reserved characters." msgstr "Der Domänenname darf keine reservierten Zeichen enthalten." msgid "Domain scoped token is not supported" msgstr "Bereichsorientiertes Token der Domäne wird nicht unterstützt" msgid "Domain specific roles are not supported in the V8 role driver" msgstr "" "Domänenspezifische rollen werden im V8-Rollentreiber nicht unterstützt." #, python-format msgid "" "Domain: %(domain)s already has a configuration defined - ignoring file: " "%(file)s." msgstr "" "Domäne: für %(domain)s ist bereits eine Konfiguration definiert - Datei wird " "ignoriert: %(file)s." msgid "Duplicate Entry" msgstr "Doppelter Eintrag" #, python-format msgid "Duplicate ID, %s." msgstr "Doppelte ID, %s." #, python-format msgid "Duplicate entry: %s" msgstr "Doppelter Eintrag: %s" #, python-format msgid "Duplicate name, %s." msgstr "Doppelter Name, %s." #, python-format msgid "Duplicate remote ID: %s" msgstr "Doppelte ferne ID: %s" msgid "EC2 access key not found." msgstr "EC2 Zugriffsschlüssel nicht gefunden." msgid "EC2 signature not supplied." msgstr "EC2-Signatur nicht angegeben." msgid "" "Either --bootstrap-password argument or OS_BOOTSTRAP_PASSWORD must be set." msgstr "" "Es muss entweder das Argument --bootstrap-password oder " "OS_BOOTSTRAP_PASSWORD gesetzt werden." msgid "Enabled field must be a boolean" msgstr "Das Feld 'Aktiviert' muss ein boolescher Wert sein" msgid "Enabled field should be a boolean" msgstr "Das Feld 'Aktiviert' sollte ein boolescher Wert sein" #, python-format msgid "Endpoint %(endpoint_id)s not found in project %(project_id)s" msgstr "Endpunkt %(endpoint_id)s nicht gefunden in Projekt %(project_id)s" msgid "Endpoint Group Project Association not found" msgstr "Projektzuordnung für Endpunktgruppe nicht gefunden" msgid "Ensure configuration option idp_entity_id is set." msgstr "" "Stellen Sie sicher, dass die Konfigurationsoption idp_entity_id gesetzt ist. " msgid "Ensure configuration option idp_sso_endpoint is set." msgstr "" "Stellen Sie sicher, dass die Konfigurationsoption idp_sso_endpoint gesetzt " "ist. " #, python-format msgid "" "Error parsing configuration file for domain: %(domain)s, file: %(file)s." msgstr "" "Fehler bei der Auswertung der Konfigurationsdatei für Domäne: %(domain)s, " "Datei: %(file)s." #, python-format msgid "Error while opening file %(path)s: %(err)s" msgstr "Fehler beim Öffnen der Datei %(path)s: %(err)s" #, python-format msgid "Error while parsing line: '%(line)s': %(err)s" msgstr "Fehler beim Parsing der Zeile '%(line)s': %(err)s" #, python-format msgid "Error while parsing rules %(path)s: %(err)s" msgstr "Fehler beim Parsing der Regeln %(path)s: %(err)s" #, python-format msgid "Error while reading metadata file, %(reason)s" msgstr "Fehler beim Lesen der Metadatendatei, %(reason)s" #, python-format msgid "" "Exceeded attempts to register domain %(domain)s to use the SQL driver, the " "last domain that appears to have had it is %(last_domain)s, giving up" msgstr "" "Die maximal zulässige Anzahl an Versuchen, die Domäne %(domain)s für die " "Verwendung des SQL-Treibers zu registrieren, wurde überschritten. Die letzte " "Domäne, bei der die Registrierung erfolgreich gewesen zu sein scheint, war " "%(last_domain)s. Abbruch." #, python-format msgid "Expected dict or list: %s" msgstr "Verzeichnis oder Liste erwartet: %s" msgid "" "Expected signing certificates are not available on the server. Please check " "Keystone configuration." msgstr "" "Erwartete Signierzertifikate sind auf dem Server nicht verfügbar. Überprüfen " "Sie die Keystone-Konfiguration." #, python-format msgid "" "Expecting to find %(attribute)s in %(target)s - the server could not comply " "with the request since it is either malformed or otherwise incorrect. The " "client is assumed to be in error." msgstr "" "Es wurde erwartet, %(attribute)s in %(target)s zu finden. Der Server konnte " "die Anforderung nicht erfüllen, da ein fehlerhaftes Format oder ein anderer " "Fehler vorliegt. Es wird angenommen, dass der Fehler beim Client liegt." #, python-format msgid "Failed to start the %(name)s server" msgstr "Fehler beim Starten des %(name)s-Servers" msgid "Failed to validate token" msgstr "Token konnte nicht geprüft werden" msgid "Federation token is expired" msgstr "Föderationstoken ist abgelaufen" #, python-format msgid "" "Field \"remaining_uses\" is set to %(value)s while it must not be set in " "order to redelegate a trust" msgstr "" "Feld \"remaining_uses\" ist auf %(value)s festgelegt, es darf jedoch nicht " "festgelegt werden, um eine Vertrauensbeziehung zu übertragen" msgid "Found invalid token: scoped to both project and domain." msgstr "" "Ungültiges Token gefunden. Es ist sowohl projekt- als auch domänenorientiert." #, python-format msgid "Group %s not found in config" msgstr "Die Gruppe %s wurde nicht in der Konfiguration gefunden." #, python-format msgid "Group %(group)s is not supported for domain specific configurations" msgstr "" "Gruppe %(group)s wird für domänenspezifische Konfigurationen nicht " "unterstützt" #, python-format msgid "" "Group %(group_id)s returned by mapping %(mapping_id)s was not found in the " "backend." msgstr "" "Die von der Zuordnung %(mapping_id)s zurückgegebene Gruppe %(group_id)s " "konnte im Back-End nicht gefunden werden." #, python-format msgid "" "Group membership across backend boundaries is not allowed, group in question " "is %(group_id)s, user is %(user_id)s" msgstr "" "Back-End-übergreifende Gruppenmitgliedschaft ist nicht zulässig, betroffene " "Gruppe ist %(group_id)s, Benutzer ist %(user_id)s" #, python-format msgid "ID attribute %(id_attr)s not found in LDAP object %(dn)s" msgstr "ID-Attribut %(id_attr)s wurde in LDAP-Objekt %(dn)s nicht gefunden" #, python-format msgid "Identity Provider %(idp)s is disabled" msgstr "Identitätsprovider %(idp)s ist inaktiviert" msgid "" "Incoming identity provider identifier not included among the accepted " "identifiers." msgstr "" "Eingehende Identitätsprovider-ID ist nicht in den akzeptierten IDs enthalten." msgid "Invalid EC2 signature." msgstr "Ungültige EC2-Signatur." #, python-format msgid "Invalid LDAP TLS certs option: %(option)s. Choose one of: %(options)s" msgstr "" "Ungültige LDAP-TLS-Zertifikatsoption: %(option)s. Wählen Sie aus: %(options)s" #, python-format msgid "Invalid LDAP TLS_AVAIL option: %s. TLS not available" msgstr "Ungültige LDAP TLS_AVAIL Option: %s. TLS nicht verfügbar" #, python-format msgid "Invalid LDAP deref option: %(option)s. Choose one of: %(options)s" msgstr "" "Ungültige LDAP-TLS-deref-Option: %(option)s. Wählen Sie aus: %(options)s" #, python-format msgid "Invalid LDAP scope: %(scope)s. Choose one of: %(options)s" msgstr "Ungültiger LDAP Bereich: %(scope)s. Wählen Sie aus: %(options)s" msgid "Invalid TLS / LDAPS combination" msgstr "Ungültige TLS /LDAPS Kombination" #, python-format msgid "Invalid audit info data type: %(data)s (%(type)s)" msgstr "Ungültiger Datentyp für Prüfungsinformationen: %(data)s (%(type)s)" msgid "Invalid blob in credential" msgstr "Ungültiges Blob-Objekt im Berechtigungsnachweis" #, python-format msgid "" "Invalid domain name: %(domain)s found in config file name: %(file)s - " "ignoring this file." msgstr "" "Ungültiger Domänenname: %(domain)s im Konfigurationsdateinamen gefunden: " "%(file)s - diese Datei wird ignoriert." #, python-format msgid "Invalid domain specific configuration: %(reason)s" msgstr "Ungültige domänenspezifische Konfiguration: %(reason)s" #, python-format msgid "Invalid input for field '%(path)s'. The value is '%(value)s'." msgstr "Ungültige Eingabe für Feld '%(path)s'. Der Wert lautet '%(value)s'." msgid "Invalid limit value" msgstr "Ungültiger Grenzwert" #, python-format msgid "" "Invalid mix of entities for policy association - only Endpoint, Service or " "Region+Service allowed. Request was - Endpoint: %(endpoint_id)s, Service: " "%(service_id)s, Region: %(region_id)s" msgstr "" "Ungültige Mischung von Entitäten für Richtlinienzuordnung - nur Endpunkt, " "Dienst oder Region+Dienst zulässig. Anforderung war - Endpunkt: " "%(endpoint_id)s, Service: %(service_id)s, Region: %(region_id)s" #, python-format msgid "" "Invalid rule: %(identity_value)s. Both 'groups' and 'domain' keywords must " "be specified." msgstr "" "Ungültige Regel: %(identity_value)s. Die Suchbegriffe 'groups' und 'domain' " "müssen angegeben sein." msgid "Invalid signature" msgstr "Ungültige Signatur" msgid "Invalid user / password" msgstr "Ungültiger Benutzer / Passwort" msgid "Invalid username or TOTP passcode" msgstr "Ungültiger Benutzername oder TOTP-Kenncode" msgid "Invalid username or password" msgstr "Ungültiger Benutzername oder ungültiges Passwort." #, python-format msgid "KVS region %s is already configured. Cannot reconfigure." msgstr "KVS-Region %s ist bereits konfiguriert. Rekonfiguration nicht möglich." #, python-format msgid "Key Value Store not configured: %s" msgstr "Schlüsselwertspeicher nicht konfiguriert: %s" #, python-format msgid "LDAP %s create" msgstr "LDAP %s erstellen" #, python-format msgid "LDAP %s delete" msgstr "LDAP %s löschen" #, python-format msgid "LDAP %s update" msgstr "LDAP %s aktualisieren" msgid "" "Length of transformable resource id > 64, which is max allowed characters" msgstr "" "Länge der transformierbaren Ressourcen-ID liegt über der maximal zulässigen " "Anzahl von 64 Zeichen. " #, python-format msgid "" "Local section in mapping %(mapping_id)s refers to a remote match that " "doesn't exist (e.g. {0} in a local section)." msgstr "" "Der lokale Abschnitt in der Zuordnung %(mapping_id)s bezieht sich auf eine " "ferne Übereinstimmung, die nicht vorhanden ist (z. B. '{0}' in einem lokalen " "Abschnitt)." #, python-format msgid "Lock Timeout occurred for key, %(target)s" msgstr "Überschreitung der Sperrzeit aufgetreten für Schlüssel %(target)s" #, python-format msgid "Lock key must match target key: %(lock)s != %(target)s" msgstr "" "Sperrschlüssel muss mit Zielschlüssel übereinstimmen: %(lock)s != %(target)s" #, python-format msgid "Malformed endpoint URL (%(endpoint)s), see ERROR log for details." msgstr "" "Fehlerhafte Endpunkt-URL (%(endpoint)s), siehe Details im FEHLER-Protokoll. " msgid "Marker could not be found" msgstr "Marker konnte nicht gefunden werden" #, python-format msgid "Max hierarchy depth reached for %s branch." msgstr "Die maximale Hierarchietiefe für den %s-Branch wurde erreicht." #, python-format msgid "Maximum lock attempts on %s occurred." msgstr "Maximale Anzahl an Sperrversuchen auf %s erfolgt." #, python-format msgid "Member %(member)s is already a member of group %(group)s" msgstr "Mitglied %(member)s ist bereits Mitglied der Gruppe %(group)s" #, python-format msgid "Method not callable: %s" msgstr "Methode kann nicht aufgerufen werden: %s" msgid "Missing entity ID from environment" msgstr "Fehlende Entitäts-ID von Umgebung" msgid "" "Modifying \"redelegation_count\" upon redelegation is forbidden. Omitting " "this parameter is advised." msgstr "" "Das Ändern von \"redelegation_count\" ist bei der Redelegation nicht " "zulässig. Es wird empfohlen, diesen Parameter auszulassen." msgid "Multiple domains are not supported" msgstr "Mehrere Domänen werden nicht unterstützt" msgid "Must be called within an active lock context." msgstr "Aufruf innerhalb des Kontexts einer aktiven Sperre erforderlich." msgid "Must specify either domain or project" msgstr "Entweder Domäne oder Projekt muss angegeben werden" msgid "Name field is required and cannot be empty" msgstr "Namensfeld ist erforderlich und darf nicht leer sein" msgid "Neither Project Domain ID nor Project Domain Name was provided." msgstr "Weder Projektdomänen-ID noch Projektdomänenname wurde angegeben." msgid "" "No Authorization headers found, cannot proceed with OAuth related calls, if " "running under HTTPd or Apache, ensure WSGIPassAuthorization is set to On." msgstr "" "Keine Authorisierungskopfzeilen gefunden, zu OAuth zugehörige Aufrufe können " "nicht fortgesetzt werden. Stellen Sie bei Ausführung unter HTTPd oder Apache " "sicher, dass WSGIPassAuthorization auf 'On' gesetzt ist." msgid "No authenticated user" msgstr "Kein authentifizierter Benutzer" msgid "" "No encryption keys found; run keystone-manage fernet_setup to bootstrap one." msgstr "" "Keine Chiffrierschlüssel gefunden; Führen Sie keystone-manage fernet_setup " "aus, um über Bootstrapping einen Schlüssel zu erhalten." msgid "No options specified" msgstr "Keine Optionen angegeben" #, python-format msgid "No policy is associated with endpoint %(endpoint_id)s." msgstr "Endpunkt %(endpoint_id)s ist keine Richtlinie zugeordnet. " #, python-format msgid "No remaining uses for trust: %(trust_id)s" msgstr "Keine verbleibende Verwendung für Vertrauensbeziehung %(trust_id)s" msgid "No token in the request" msgstr "Kein Token in der Anforderung" msgid "Non-default domain is not supported" msgstr "Nicht-Standard-Domäne wird nicht unterstützt" msgid "One of the trust agents is disabled or deleted" msgstr "Einer der Vertrauensagenten wurde deaktiviert oder gelöscht" #, python-format msgid "" "Option %(option)s found with no group specified while checking domain " "configuration request" msgstr "" "Option %(option)s ohne angegebene Gruppe gefunden, während die Domänen- " "Konfigurationsanforderung geprüft wurde" #, python-format msgid "" "Option %(option)s in group %(group)s is not supported for domain specific " "configurations" msgstr "" "Option %(option)s in Gruppe %(group)s wird für domänenspezifische " "Konfigurationen nicht unterstützt" #, python-format msgid "Project (%s)" msgstr "Projekt (%s)" #, python-format msgid "Project ID not found: %(t_id)s" msgstr "Projekt-ID nicht gefunden: %(t_id)s" msgid "Project field is required and cannot be empty." msgstr "Projektfeld ist erforderlich und darf nicht leer sein." #, python-format msgid "Project is disabled: %s" msgstr "Projekt ist inaktiviert: %s" msgid "Project name cannot contain reserved characters." msgstr "Der Projektname darf keine reservierten Zeichen enthalten." msgid "Query string is not UTF-8 encoded" msgstr "Abfragezeichenfolge ist nicht UTF-8-codiert" #, python-format msgid "" "Reading the default for option %(option)s in group %(group)s is not supported" msgstr "" "Lesen des Standardwerts für die Option %(option)s in der Gruppe %(group)s " "wird nicht unterstützt." msgid "Redelegation allowed for delegated by trust only" msgstr "Redelegation nur zulässig für im Vertrauen redelegierte" #, python-format msgid "" "Remaining redelegation depth of %(redelegation_depth)d out of allowed range " "of [0..%(max_count)d]" msgstr "" "Verbleibende Redelegationstiefe von %(redelegation_depth)d aus dem " "zulässigen Bereich von [0..%(max_count)d]" msgid "" "Remove admin_crud_extension from the paste pipeline, the admin_crud " "extension is now always available. Updatethe [pipeline:admin_api] section in " "keystone-paste.ini accordingly, as it will be removed in the O release." msgstr "" "Entfernen Sie 'admin_crud_extension' aus der Einfügepipeline. " "'admin_crud_extension' ist jetzt immer verfügbar. Aktualisieren Sie den " "Abschnitt [pipeline:admin_api] in der Datei 'keystone-paste.ini' " "entsprechend, da er im 'O'-Release entfernt wird. " msgid "" "Remove endpoint_filter_extension from the paste pipeline, the endpoint " "filter extension is now always available. Update the [pipeline:api_v3] " "section in keystone-paste.ini accordingly as it will be removed in the O " "release." msgstr "" "Entfernen Sie 'endpoint_filter_extension' aus der Einfügepipeline. Die " "Endpunktfiltererweiterung ist jetzt immer verfügbar. Aktualisieren Sie den " "Abschnitt [pipeline:api_v3] in der Datei 'keystone-paste.ini' entsprechend, " "da er im 'O'-Release entfernt wird." msgid "" "Remove federation_extension from the paste pipeline, the federation " "extension is now always available. Update the [pipeline:api_v3] section in " "keystone-paste.ini accordingly, as it will be removed in the O release." msgstr "" "Entfernen Sie 'federation_extension' aus der Einfügepipeline. Sie ist jetzt " "immer verfügbar. Aktualisieren Sie den Abschnitt [pipeline:api_v3] in der " "Datei 'keystone-paste.ini' entsprechend, da er im 'O'-Release entfernt wird." msgid "" "Remove oauth1_extension from the paste pipeline, the oauth1 extension is now " "always available. Update the [pipeline:api_v3] section in keystone-paste.ini " "accordingly, as it will be removed in the O release." msgstr "" "Entfernen Sie 'oauth1_extension' aus der Einfügepipeline. Die oauth1-" "Erweiterung ist jetzt immer verfügbar. Aktualisieren Sie den Abschnitt " "[pipeline:api_v3] in der Datei 'keystone-paste.ini' entsprechend, da er im " "'O'-Release entfernt wird." msgid "" "Remove revoke_extension from the paste pipeline, the revoke extension is now " "always available. Update the [pipeline:api_v3] section in keystone-paste.ini " "accordingly, as it will be removed in the O release." msgstr "" "Entfernen Sie 'revoke_extension' aus der Einfügepipeline. Die revoke-" "Erweiterung ist jetzt immer verfügbar. Aktualisieren Sie den Abschnitt " "[pipeline:api_v3] in der Datei 'keystone-paste.ini' entsprechend, da er im " "'O'-Release entfernt wird. " msgid "" "Remove simple_cert from the paste pipeline, the PKI and PKIz token providers " "are now deprecated and simple_cert was only used insupport of these token " "providers. Update the [pipeline:api_v3] section in keystone-paste.ini " "accordingly, as it will be removed in the O release." msgstr "" "Entfernen Sie 'simple_cert' aus der Einfügepipeline. Die PKI- und PKIz-Token-" "Provider sind jetzt veraltet und 'simple_cert' wurde nur zur Unterstützung " "dieser Token-Provider verwendet. Aktualisieren Sie den Abschnitt [pipeline:" "api_v3] in der Datei 'keystone-paste.ini' entsprechend, da er im 'O'-Release " "entfernt wird." msgid "" "Remove user_crud_extension from the paste pipeline, the user_crud extension " "is now always available. Updatethe [pipeline:public_api] section in keystone-" "paste.ini accordingly, as it will be removed in the O release." msgstr "" "Entfernen Sie 'user_crud_extension' aus der Einfügepipeline. 'user_crud " "extension' ist jetzt immer verfügbar. Aktualisieren Sie den Abschnitt " "[pipeline:public_api] in der Datei 'keystone-paste.ini' entsprechend, da er " "im 'O'-Release entfernt wird." msgid "Request Token does not have an authorizing user id" msgstr "Anforderungstoken weist keine autorisierte Benutzer-ID auf" #, python-format msgid "" "Request attribute %(attribute)s must be less than or equal to %(size)i. The " "server could not comply with the request because the attribute size is " "invalid (too large). The client is assumed to be in error." msgstr "" "Anforderungsattribut %(attribute)s muss kleiner-gleich %(size)i sein. Der " "Server konnte die Anforderung nicht erfüllen, da die Attributgröße ungültig " "ist (zu groß). Es wird angenommen, dass der Fehler beim Client liegt." msgid "Request must have an origin query parameter" msgstr "Anforderung muss über einen ursprünglichen Abfrageparameter verfügen" msgid "Request token is expired" msgstr "Anforderungstoken ist abgelaufen" msgid "Request token not found" msgstr "Anforderungstoken nicht gefunden" msgid "Requested expiration time is more than redelegated trust can provide" msgstr "" "Angeforderte Ablaufzeit übersteigt die, die von der redelegierten " "Vertrauensbeziehung bereitgestellt werden kann" #, python-format msgid "" "Requested redelegation depth of %(requested_count)d is greater than allowed " "%(max_count)d" msgstr "" "Die angeforderte Redelegationstiefe von %(requested_count)d übersteigt den " "zulässigen Wert von %(max_count)d" msgid "" "Running keystone via eventlet is deprecated as of Kilo in favor of running " "in a WSGI server (e.g. mod_wsgi). Support for keystone under eventlet will " "be removed in the \"M\"-Release." msgstr "" "Die Ausführung von Keystone über eventlet ist seit Kilo veraltet. " "Stattdessen wird ein WSGI-Server (z. B. mod_wsgi) für die Ausführung " "verwendet. Unterstützung für Keystone unter eventlet wird im \"M\"-Release " "entfernt." msgid "Scoping to both domain and project is not allowed" msgstr "Scoping sowohl auf 'domain' als auch auf 'project' ist nicht zulässig" msgid "Scoping to both domain and trust is not allowed" msgstr "Scoping sowohl auf 'domain' als auch auf 'trust' ist nicht zulässig" msgid "Scoping to both project and trust is not allowed" msgstr "Scoping sowohl auf 'project' als auch auf 'trust' ist nicht zulässig" #, python-format msgid "Service Provider %(sp)s is disabled" msgstr "Service-Provider %(sp)s ist inaktiviert" msgid "Some of requested roles are not in redelegated trust" msgstr "" "Einige angeforderte Rollen befinden sich nicht in einer redelegierten " "Vertrauensbeziehung" msgid "Specify a domain or project, not both" msgstr "Geben Sie eine Domäne oder ein Projekt an, nicht beides" msgid "Specify a user or group, not both" msgstr "Geben Sie einen Benutzer oder eine Gruppe an, nicht beides" msgid "Specify one of domain or project" msgstr "Entweder eine Domäne oder ein Projekt muss angegeben werden" msgid "Specify one of user or group" msgstr "Entweder ein Benutzer oder eine Gruppe muss angegeben werden" #, python-format msgid "" "String length exceeded.The length of string '%(string)s' exceeded the limit " "of column %(type)s(CHAR(%(length)d))." msgstr "" "Zeichenfolgelänge überschritten. Die Länge der Zeichenfolge '%(string)s' hat " "den Grenzwert von Spalte %(type)s(CHAR(%(length)d)) überschritten." msgid "Tenant name cannot contain reserved characters." msgstr "Der Name des Mandanten darf keine reservierten Zeichen enthalten." #, python-format msgid "" "The %s extension has been moved into keystone core and as such its " "migrations are maintained by the main keystone database control. Use the " "command: keystone-manage db_sync" msgstr "" "Die Erweiterung %s wurde in den Keystone-Kern verschoben. Daher werden die " "zugehörigen Migrationen über die Keystone-Hauptdatenbanksteuerung verwaltet. " "Verwenden Sie den Befehl keystone-manage db_sync" msgid "" "The 'expires_at' must not be before now. The server could not comply with " "the request since it is either malformed or otherwise incorrect. The client " "is assumed to be in error." msgstr "" "Die Zeitangabe in 'expires_at' darf nicht vor dem jetzigen Zeitpunkt liegen. " "Der Server konnte der Anforderung nicht nachkommen, da ein fehlerhaftes " "Format oder ein anderer Fehler vorliegt. Es wird angenommen, dass der Fehler " "beim Client liegt." msgid "The --all option cannot be used with the --domain-name option" msgstr "" "Die Option --all kann nicht zusammen mit der Option --domain-name verwendet " "werden" #, python-format msgid "The Keystone configuration file %(config_file)s could not be found." msgstr "" "Die Keystone-Konfigurationsdatei %(config_file)s konnte nicht gefunden " "werden." #, python-format msgid "" "The Keystone domain-specific configuration has specified more than one SQL " "driver (only one is permitted): %(source)s." msgstr "" "Die domänenspezifische Keystone-Konfiguration hat mehrere SQL-Treiber " "angegeben (nur einer ist zulässig): %(source)s." msgid "The action you have requested has not been implemented." msgstr "Die von Ihnen angeforderte Aktion wurde nicht implementiert." msgid "The authenticated user should match the trustor." msgstr "Der authentifizierte Benutzer sollte dem Trustor entsprechen." msgid "" "The certificates you requested are not available. It is likely that this " "server does not use PKI tokens otherwise this is the result of " "misconfiguration." msgstr "" "Die Zertifikate, die Sie angefordert haben, sind nicht verfügbar. Es ist " "wahrscheinlich, dass dieser Server keine PKI-Tokens verwendet; andernfalls " "ist dies die Folge einer fehlerhaften Konfiguration." msgid "The configured token provider does not support bind authentication." msgstr "" "Der konfigurierte Token-Anbieter unterstützt die Bindungsauthentifizierung " "nicht." msgid "The creation of projects acting as domains is not allowed in v2." msgstr "" "Die Erstellung von Projekten die als Domänen agieren, ist in v2 nicht " "zulässig." #, python-format msgid "" "The password length must be less than or equal to %(size)i. The server could " "not comply with the request because the password is invalid." msgstr "" "Die Kennwortlänge muss kleiner-gleich %(size)i sein. Der Server konnte die " "Anforderung nicht erfüllen, da das Kennwort ungültig ist." msgid "The request you have made requires authentication." msgstr "Die von Ihnen gestellte Anfrage erfoderdert eine Authentifizierung." msgid "The resource could not be found." msgstr "Die Ressource konnte nicht gefunden werden." msgid "" "The revoke call must not have both domain_id and project_id. This is a bug " "in the Keystone server. The current request is aborted." msgstr "" "Der Aufruf zum Entziehen darf nicht sowohl domain_id als auch project_id " "aufweisen. Dies ist ein Fehler im Keystone-Server. Die aktuelle Anforderung " "wird abgebrochen. " msgid "The service you have requested is no longer available on this server." msgstr "" "Den Dienst, den Sie angefordert haben, ist auf diesem Server nicht mehr " "verfügbar." #, python-format msgid "" "The specified parent region %(parent_region_id)s would create a circular " "region hierarchy." msgstr "" "Die angegebene übergeordnete Region %(parent_region_id)s würde eine " "zirkuläre Regionshierarchie erstellen." #, python-format msgid "" "The value of group %(group)s specified in the config should be a dictionary " "of options" msgstr "" "Der Wert der Gruppe %(group)s, der in der Konfiguration angegeben ist, muss " "ein Verzeichnis mit Optionen sein" msgid "There should not be any non-oauth parameters" msgstr "Es sollten keine non-oauth-Parameter vorhanden sein" #, python-format msgid "This is not a recognized Fernet payload version: %s" msgstr "Dies ist keine anerkannte Fernet-Nutzdatenversion: %s" #, python-format msgid "This is not a recognized Fernet token %s" msgstr "Dies ist kein bekanntes Fernet-Token %s" msgid "" "Timestamp not in expected format. The server could not comply with the " "request since it is either malformed or otherwise incorrect. The client is " "assumed to be in error." msgstr "" "Zeitstempel nicht im erwarteten Format. Der Server konnte der Anforderung " "nicht nachkommen, da ein fehlerhaftes Format oder ein anderer Fehler " "vorliegt. Es wird angenommen, dass der Fehler beim Client liegt." #, python-format msgid "" "To get a more detailed information on this error, re-run this command for " "the specific domain, i.e.: keystone-manage domain_config_upload --domain-" "name %s" msgstr "" "Um ausführliche Informationen zu diesem Fehler zu erhalten, führen Sie " "diesen Befehl für die angegebene Domäne erneut durch: keystone-manage " "domain_config_upload --domain-name %s" msgid "Token belongs to another user" msgstr "Token gehört einem anderen Benutzer" msgid "Token does not belong to specified tenant." msgstr "Token gehört nicht zu angegebenem Nutzer." msgid "Token version is unrecognizable or unsupported." msgstr "Tokenversion ist nicht erkennbar oder wird nicht unterstützt." msgid "Trustee has no delegated roles." msgstr "Trustee hat keine beauftragten Rollen." msgid "Trustor is disabled." msgstr "Trustor ist deaktiviert." #, python-format msgid "" "Trying to update group %(group)s, so that, and only that, group must be " "specified in the config" msgstr "" "Es wird versucht, Gruppe %(group)s zu aktualisieren, damit nur diese Gruppe " "in der Konfiguration angegeben werden muss" #, python-format msgid "" "Trying to update option %(option)s in group %(group)s, but config provided " "contains option %(option_other)s instead" msgstr "" "Es wird versucht, Option %(option)s in Gruppe %(group)s zu aktualisieren, " "die angegebene Konfiguration enthält jedoch stattdessen Option " "%(option_other)s" #, python-format msgid "" "Trying to update option %(option)s in group %(group)s, so that, and only " "that, option must be specified in the config" msgstr "" "Es wird versucht, Option %(option)s in Gruppe %(group)s zu aktualisieren, " "damit nur diese Option in der Konfiguration angegeben werden muss" msgid "" "Unable to access the keystone database, please check it is configured " "correctly." msgstr "" "Auf die Keystone-Datenbank kann nicht zugegriffen werden, überprüfen Sie, ob " "sie ordnungsgemäß konfiguriert ist. " #, python-format msgid "Unable to consume trust %(trust_id)s, unable to acquire lock." msgstr "" "Vertrauensbeziehung %(trust_id)s kann nicht verarbeitet werden, Sperre kann " "nicht angefordert werden." #, python-format msgid "" "Unable to delete region %(region_id)s because it or its child regions have " "associated endpoints." msgstr "" "Region %(region_id)s kann nicht gelöscht werden, da sie oder ihr " "untergeordnete Regionen über zugeordnete Endpunkte verfügen. " msgid "Unable to downgrade schema" msgstr "Das Schema konnte nicht herabgestuft werden." #, python-format msgid "Unable to find valid groups while using mapping %(mapping_id)s" msgstr "" "Beim Verwenden der Zuordnung %(mapping_id)s können keine gültigen Gruppen " "gefunden werden" #, python-format msgid "Unable to locate domain config directory: %s" msgstr "Domänenkonfigurationsverzeichnis wurde nicht gefunden: %s" #, python-format msgid "Unable to lookup user %s" msgstr "Suche nach Benutzer %s nicht möglich" #, python-format msgid "" "Unable to reconcile identity attribute %(attribute)s as it has conflicting " "values %(new)s and %(old)s" msgstr "" "Identitätsattribut %(attribute)s kann nicht abgeglichen werden, da es die " "kollidierenden Werte %(new)s und %(old)s aufweist" #, python-format msgid "" "Unable to sign SAML assertion. It is likely that this server does not have " "xmlsec1 installed, or this is the result of misconfiguration. Reason " "%(reason)s" msgstr "" "SAML-Zusicherung kann nicht signiert werden. Wahrscheinlich ist auf dem " "Server xmlsec1 nicht installiert oder dies liegt an einer fehlerhaften " "Konfiguration. Ursache: %(reason)s" msgid "Unable to sign token." msgstr "Token kann nicht unterzeichnet werden." #, python-format msgid "Unexpected assignment type encountered, %s" msgstr "Unerwarteter Zuordnungstyp: %s" #, python-format msgid "" "Unexpected combination of grant attributes - User: %(user_id)s, Group: " "%(group_id)s, Project: %(project_id)s, Domain: %(domain_id)s" msgstr "" "Unerwartete Kombination von Grant-Attributen - Benutzer: %(user_id)s, " "Gruppe: %(group_id)s, Projekt: %(project_id)s, Domäne: %(domain_id)s" #, python-format msgid "Unexpected status requested for JSON Home response, %s" msgstr "Unerwarteter Status für JSON-Home-Antwort angefordert, %s" msgid "Unknown Target" msgstr "Unbekanntes Ziel" #, python-format msgid "Unknown domain '%(name)s' specified by --domain-name" msgstr "Unbekannte Domäne '%(name)s' angegeben durch --domain-name" #, python-format msgid "Unknown token version %s" msgstr "Unbekannte Tokenversion %s" #, python-format msgid "Unregistered dependency: %(name)s for %(targets)s" msgstr "Nicht registrierte Abhängigkeit: %(name)s für %(targets)s" msgid "Update of `domain_id` is not allowed." msgstr "Das Aktualisieren von `domain_id` ist nicht zulässig. " msgid "Update of `is_domain` is not allowed." msgstr "Das Aktualisieren von 'is_domain' ist nicht zulässig." msgid "Update of `parent_id` is not allowed." msgstr "Das Aktualisieren von 'parent_id' ist nicht zulässig." msgid "Update of domain_id is only allowed for root projects." msgstr "Die Aktualisierung von 'domain_id' ist nur für Rootprojekte zulässig." msgid "Update of domain_id of projects acting as domains is not allowed." msgstr "" "Es ist nicht zulässig, die 'domain_id' von Projekten zu aktualisieren, die " "als Domänen agieren." msgid "Use a project scoped token when attempting to create a SAML assertion" msgstr "" "Verwenden Sie ein Projektumfangstoken, wenn Sie versuchen, eine SAML-" "Zusicherung zu erstellen" msgid "" "Use of the identity driver config to automatically configure the same " "assignment driver has been deprecated, in the \"O\" release, the assignment " "driver will need to be expicitly configured if different than the default " "(SQL)." msgstr "" "Die Verwendung der Identitätstreiberkonfiguration für die automatische " "Konfiguration desselben Zuordnungstreibers ist veraltet. Der " "Zuordnungstreiber muss im \"O\"-Release explizit konfiguriert werden, wenn " "er sich vom Standardtreiber (SQL) unterscheidet." #, python-format msgid "User %(u_id)s is unauthorized for tenant %(t_id)s" msgstr "Benutzer %(u_id)s ist nicht berechtigt für Nutzer %(t_id)s" #, python-format msgid "User %(user_id)s has no access to domain %(domain_id)s" msgstr "Benutzer %(user_id)s hat keinen Zugriff auf Domäne %(domain_id)s" #, python-format msgid "User %(user_id)s has no access to project %(project_id)s" msgstr "Benutzer %(user_id)s hat keinen Zugriff auf Projekt %(project_id)s" #, python-format msgid "User %(user_id)s is already a member of group %(group_id)s" msgstr "Benutzer %(user_id)s ist bereits Mitglied der Gruppe %(group_id)s." #, python-format msgid "User '%(user_id)s' not found in group '%(group_id)s'" msgstr "Benutzer '%(user_id)s' nicht gefunden in Gruppe '%(group_id)s'" msgid "User IDs do not match" msgstr "Benutzerkennungen stimmen nicht überein" msgid "" "User auth cannot be built due to missing either user id, or user name with " "domain id, or user name with domain name." msgstr "" "Benutzerauthentifizierung kann nicht erstellt werden, da entweder Benutzer-" "ID oder Benutzername mit Domänen-ID oder Benutzername mit Domänenname fehlt." #, python-format msgid "User is disabled: %s" msgstr "Benutzer ist deaktiviert: %s" msgid "User is not a member of the requested project" msgstr "Benutzer ist kein Mitglied des angeforderten Projekts" msgid "User is not a trustee." msgstr "Benutzer ist kein Trustee." msgid "User not found" msgstr "Benutzer nicht gefunden" msgid "User not valid for tenant." msgstr "Benutzer nicht gültig für Mandant." msgid "User roles not supported: tenant_id required" msgstr "Benutzerrollen nicht unterstützt: tenant_id erforderlich" #, python-format msgid "User type %s not supported" msgstr "Benutzertyp %s nicht unterstützt" msgid "You are not authorized to perform the requested action." msgstr "" "Sie sind nicht dazu authorisiert, die angeforderte Aktion durchzuführen." #, python-format msgid "You are not authorized to perform the requested action: %(action)s" msgstr "" "Sie sind nicht berechtigt, die angeforderte Aktion %(action)s auszuführen" msgid "" "You have tried to create a resource using the admin token. As this token is " "not within a domain you must explicitly include a domain for this resource " "to belong to." msgstr "" "Sie haben versucht, eine Ressourcen mit dem Admin-Token zu erstellen. Da " "sich dieses Token nicht innerhalb einer Domäne befindet, müssen Sie explizit " "eine Domäne angeben, zu der diese Ressource gehört. " msgid "`key_mangler` functions must be callable." msgstr "`key_mangler`-Funktionen müssen aufrufbar sein." msgid "`key_mangler` option must be a function reference" msgstr "Option `key_mangler` muss eine Funktionsreferenz sein" msgid "any options" msgstr "beliebige Optionen" msgid "auth_type is not Negotiate" msgstr "auth_type ist nicht 'Negotiate'" msgid "authorizing user does not have role required" msgstr "Der autorisierte Benutzer verfügt nicht über die erforderliche Rolle" #, python-format msgid "cannot create a project in a branch containing a disabled project: %s" msgstr "" "kann kein Projekt in einer Niederlassung erstellen, die ein inaktiviertes " "Projekt enthält: %s" #, python-format msgid "" "cannot delete an enabled project acting as a domain. Please disable the " "project %s first." msgstr "" "Ein aktiviertes Projekt, das als Domäne agiert, kann nicht gelöscht werden. " "Inaktivieren Sie zuerst das Projekt %s." #, python-format msgid "group %(group)s" msgstr "Gruppe %(group)s" msgid "" "idp_contact_type must be one of: [technical, other, support, administrative " "or billing." msgstr "" "idp_contact_type muss einer der folgenden Werte sein: technical, other, " "support, administrative oder billing." #, python-format msgid "invalid date format %s" msgstr "ungültiges Datumsformat %s" #, python-format msgid "" "it is not permitted to have two projects acting as domains with the same " "name: %s" msgstr "" "Es ist nicht zulässig, zwei Projekte zu haben, die als Domänen mit demselben " "Namen agieren: %s" #, python-format msgid "" "it is not permitted to have two projects within a domain with the same " "name : %s" msgstr "" "Es ist nicht zulässig, zwei Projekte mit demselben Namen innerhalb einer " "Domäne zu haben: %s" msgid "only root projects are allowed to act as domains." msgstr "Nur Rootprojekte dürfen als Domänen agieren." #, python-format msgid "option %(option)s in group %(group)s" msgstr "Option %(option)s in Gruppe %(group)s" msgid "provided consumer key does not match stored consumer key" msgstr "" "bereitgestellter Konsumentenschlüssel stimmt nicht mit dem gespeicherten " "Konsumentenschlüssel überein" msgid "provided request key does not match stored request key" msgstr "" "bereitgestellter Anforderungsschlüssel stimmt nicht mit dem gespeicherten " "Anforderungsschlüssel überein" msgid "provided verifier does not match stored verifier" msgstr "" "bereitgestellte Prüffunktion stimmt nicht mit gespeicherter Prüffunktion " "überein" msgid "remaining_uses must be a positive integer or null." msgstr "remaining_uses muss eine positive Ganzzahl oder null sein." msgid "remaining_uses must not be set if redelegation is allowed" msgstr "" "remaining_uses darf nicht festgelegt werden, wenn eine Redelegation zulässig " "ist" #, python-format msgid "" "request to update group %(group)s, but config provided contains group " "%(group_other)s instead" msgstr "" "Anforderung zur Aktualisierung von Gruppe %(group)s, die angegebene " "Konfiguration enthält jedoch stattdessen Gruppe %(group_other)s" msgid "rescope a scoped token" msgstr "Bereich für bereichsorientierten Token ändern" #, python-format msgid "role %s is not defined" msgstr "Die Rolle %s ist nicht definiert." msgid "scope.project.id must be specified if include_subtree is also specified" msgstr "" "scope.project.id muss angegeben werden, wenn include_subtree angegeben wurde." #, python-format msgid "tls_cacertdir %s not found or is not a directory" msgstr "tls_cacertdir %s nicht gefunden oder ist kein Verzeichnis" #, python-format msgid "tls_cacertfile %s not found or is not a file" msgstr "tls_cacertfile %s wurde nicht gefunden oder ist keine Datei" #, python-format msgid "token reference must be a KeystoneToken type, got: %s" msgstr "Tokenreferenz muss vom Typ 'KeystoneToken' sein. Abgerufen wurde: %s" msgid "" "update of domain_id is deprecated as of Mitaka and will be removed in O." msgstr "" "Die Aktualisierung von 'domain_id' wurde in Mitaka eingestellt und wird im " "\"O\"-Release entfernt. " #, python-format msgid "" "validated expected to find %(param_name)r in function signature for " "%(func_name)r." msgstr "" "Validierung erwartete %(param_name)r in Funktionssignatur für %(func_name)r." keystone-9.0.0/keystone/locale/keystone-log-info.pot0000664000567000056710000001273312701407105023722 0ustar jenkinsjenkins00000000000000# Translations template for keystone. # Copyright (C) 2016 OpenStack Foundation # This file is distributed under the same license as the keystone project. # FIRST AUTHOR , 2016. # #, fuzzy msgid "" msgstr "" "Project-Id-Version: keystone 9.0.0.0rc2.dev1\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n" "POT-Creation-Date: 2016-03-18 06:34+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 2.2.0\n" #: keystone/assignment/core.py:200 #, python-format msgid "Creating the default role %s because it does not exist." msgstr "" #: keystone/assignment/core.py:208 #, python-format msgid "Creating the default role %s failed because it was already created" msgstr "" #: keystone/auth/controllers.py:112 #, python-format msgid "" "\"expires_at\" has conflicting values %(existing)s and %(new)s. Will use" " the earliest value." msgstr "" #: keystone/cmd/cli.py:188 #, python-format msgid "Created domain %s" msgstr "" #: keystone/cmd/cli.py:191 #, python-format msgid "Domain %s already exists, skipping creation." msgstr "" #: keystone/cmd/cli.py:204 #, python-format msgid "Created project %s" msgstr "" #: keystone/cmd/cli.py:206 #, python-format msgid "Project %s already exists, skipping creation." msgstr "" #: keystone/cmd/cli.py:216 #, python-format msgid "User %s already exists, skipping creation." msgstr "" #: keystone/cmd/cli.py:226 #, python-format msgid "Created user %s" msgstr "" #: keystone/cmd/cli.py:235 #, python-format msgid "Created Role %s" msgstr "" #: keystone/cmd/cli.py:237 #, python-format msgid "Role %s exists, skipping creation." msgstr "" #: keystone/cmd/cli.py:254 #, python-format msgid "Granted %(role)s on %(project)s to user %(username)s." msgstr "" #: keystone/cmd/cli.py:260 #, python-format msgid "User %(username)s already has %(role)s on %(project)s." msgstr "" #: keystone/cmd/cli.py:271 #, python-format msgid "Created Region %s" msgstr "" #: keystone/cmd/cli.py:273 #, python-format msgid "Region %s exists, skipping creation." msgstr "" #: keystone/cmd/cli.py:330 #, python-format msgid "Created %(interface)s endpoint %(url)s" msgstr "" #: keystone/cmd/cli.py:335 #, python-format msgid "Skipping %s endpoint as already created" msgstr "" #: keystone/cmd/cli.py:639 #, python-format msgid "Scanning %r for domain config files" msgstr "" #: keystone/common/openssl.py:80 #, python-format msgid "Running command - %s" msgstr "" #: keystone/common/wsgi.py:80 msgid "No bind information present in token" msgstr "" #: keystone/common/wsgi.py:87 #, python-format msgid "Named bind mode %s not in bind information" msgstr "" #: keystone/common/wsgi.py:94 msgid "Kerberos credentials required and not present" msgstr "" #: keystone/common/wsgi.py:98 msgid "Kerberos credentials do not match those in bind" msgstr "" #: keystone/common/wsgi.py:102 msgid "Kerberos bind authentication successful" msgstr "" #: keystone/common/wsgi.py:109 #, python-format msgid "Couldn't verify unknown bind: {%(bind_type)s: %(identifier)s}" msgstr "" #: keystone/common/environment/eventlet_server.py:116 #, python-format msgid "Starting %(arg0)s on %(host)s:%(port)s" msgstr "" #: keystone/common/kvs/core.py:159 #, python-format msgid "Adding proxy '%(proxy)s' to KVS %(name)s." msgstr "" #: keystone/common/kvs/core.py:209 #, python-format msgid "Using %(func)s as KVS region %(name)s key_mangler" msgstr "" #: keystone/common/kvs/core.py:221 #, python-format msgid "" "Using default keystone.common.kvs.sha1_mangle_key as KVS region %s " "key_mangler" msgstr "" #: keystone/common/kvs/core.py:231 #, python-format msgid "KVS region %s key_mangler disabled." msgstr "" #: keystone/middleware/auth.py:172 #, python-format msgid "Cannot find client issuer in env by the issuer attribute - %s." msgstr "" #: keystone/middleware/auth.py:180 #, python-format msgid "" "The client issuer %(client_issuer)s does not match with the trusted " "issuer %(trusted_issuer)s" msgstr "" #: keystone/token/persistence/backends/sql.py:286 #, python-format msgid "Total expired tokens removed: %d" msgstr "" #: keystone/token/providers/fernet/token_formatters.py:174 #, python-format msgid "" "Fernet token created with length of %d characters, which exceeds 255 " "characters" msgstr "" #: keystone/token/providers/fernet/utils.py:76 msgid "" "[fernet_tokens] key_repository does not appear to exist; attempting to " "create it" msgstr "" #: keystone/token/providers/fernet/utils.py:134 #, python-format msgid "Created a new key: %s" msgstr "" #: keystone/token/providers/fernet/utils.py:147 msgid "Key repository is already initialized; aborting." msgstr "" #: keystone/token/providers/fernet/utils.py:188 #, python-format msgid "Starting key rotation with %(count)s key files: %(list)s" msgstr "" #: keystone/token/providers/fernet/utils.py:194 #, python-format msgid "Current primary key is: %s" msgstr "" #: keystone/token/providers/fernet/utils.py:196 #, python-format msgid "Next primary key will be: %s" msgstr "" #: keystone/token/providers/fernet/utils.py:206 #, python-format msgid "Promoted key 0 to be the primary: %s" msgstr "" #: keystone/token/providers/fernet/utils.py:227 #, python-format msgid "Excess key to purge: %s" msgstr "" #: keystone/token/providers/fernet/utils.py:262 #, python-format msgid "Loaded %(count)d encryption keys (max_active_keys=%(max)d) from: %(dir)s" msgstr "" keystone-9.0.0/keystone/locale/keystone-log-error.pot0000664000567000056710000001134412701407102024112 0ustar jenkinsjenkins00000000000000# Translations template for keystone. # Copyright (C) 2016 OpenStack Foundation # This file is distributed under the same license as the keystone project. # FIRST AUTHOR , 2016. # #, fuzzy msgid "" msgstr "" "Project-Id-Version: keystone 9.0.0.0b4.dev37\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n" "POT-Creation-Date: 2016-03-08 06:03+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 2.2.0\n" #: keystone/notifications.py:370 msgid "Failed to construct notifier" msgstr "" #: keystone/notifications.py:466 #, python-format msgid "Failed to send %(res_id)s %(event_type)s notification" msgstr "" #: keystone/notifications.py:733 #, python-format msgid "Failed to send %(action)s %(event_type)s notification" msgstr "" #: keystone/assignment/core.py:669 #, python-format msgid "Circular reference found role inference rules - %(prior_role_id)s." msgstr "" #: keystone/catalog/core.py:75 #, python-format msgid "Malformed endpoint - %(url)r is not a string" msgstr "" #: keystone/catalog/core.py:80 #, python-format msgid "Malformed endpoint %(url)s - unknown key %(keyerror)s" msgstr "" #: keystone/catalog/core.py:88 #, python-format msgid "" "Malformed endpoint '%(url)s'. The following type error occurred during " "string substitution: %(typeerror)s" msgstr "" #: keystone/catalog/core.py:94 #, python-format msgid "" "Malformed endpoint %s - incomplete format (are you missing a type " "notifier ?)" msgstr "" #: keystone/common/openssl.py:90 #, python-format msgid "Command %(to_exec)s exited with %(retcode)s - %(output)s" msgstr "" #: keystone/common/openssl.py:114 #, python-format msgid "Failed to remove file %(file_path)r: %(error)s" msgstr "" #: keystone/common/utils.py:267 msgid "" "Error setting up the debug environment. Verify that the option --debug-" "url has the format : and that a debugger processes is " "listening on that port." msgstr "" #: keystone/common/environment/eventlet_server.py:112 #, python-format msgid "Could not bind to %(host)s:%(port)s" msgstr "" #: keystone/common/environment/eventlet_server.py:211 msgid "Server error" msgstr "" #: keystone/endpoint_policy/core.py:131 keystone/endpoint_policy/core.py:231 #, python-format msgid "" "Circular reference or a repeated entry found in region tree - " "%(region_id)s." msgstr "" #: keystone/federation/idp.py:440 #, python-format msgid "Error when signing assertion, reason: %(reason)s%(output)s" msgstr "" #: keystone/oauth1/core.py:135 msgid "Cannot retrieve Authorization headers" msgstr "" #: keystone/resource/core.py:728 #, python-format msgid "" "Asked to convert a non-domain project into a domain - Domain: " "%(domain_id)s, Project ID: %(id)s, Project Name: %(project_name)s" msgstr "" #: keystone/resource/core.py:831 #, python-format msgid "" "Circular reference or a repeated entry found projects hierarchy - " "%(project_id)s." msgstr "" #: keystone/resource/core.py:912 msgid "Failed to create the default domain." msgstr "" #: keystone/resource/core.py:1419 keystone/resource/V8_backends/sql.py:100 #: keystone/resource/V8_backends/sql.py:119 #: keystone/resource/backends/sql.py:137 keystone/resource/backends/sql.py:156 #, python-format msgid "" "Circular reference or a repeated entry found in projects hierarchy - " "%(project_id)s." msgstr "" #: keystone/resource/core.py:1600 #, python-format msgid "" "Unexpected results in response for domain config - %(count)s responses, " "first option is %(option)s, expected option %(expected)s" msgstr "" #: keystone/token/provider.py:334 #, python-format msgid "Unexpected error or malformed token determining token expiry: %s" msgstr "" #: keystone/token/persistence/backends/kvs.py:236 #, python-format msgid "" "Reinitializing revocation list due to error in loading revocation list " "from backend. Expected `list` type got `%(type)s`. Old revocation list " "data: %(list)r" msgstr "" #: keystone/token/providers/common.py:726 msgid "Failed to validate token" msgstr "" #: keystone/token/providers/pki.py:52 msgid "Unable to sign token" msgstr "" #: keystone/token/providers/fernet/utils.py:42 #, python-format msgid "" "Either [fernet_tokens] key_repository does not exist or Keystone does not" " have sufficient permission to access it: %s" msgstr "" #: keystone/token/providers/fernet/utils.py:66 #, python-format msgid "Unable to convert Keystone user or group ID. Error: %s" msgstr "" #: keystone/token/providers/fernet/utils.py:83 msgid "" "Failed to create [fernet_tokens] key_repository: either it already exists" " or you don't have sufficient permissions to create it" msgstr "" keystone-9.0.0/keystone/locale/fr/0000775000567000056710000000000012701407246020234 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/locale/fr/LC_MESSAGES/0000775000567000056710000000000012701407246022021 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/locale/fr/LC_MESSAGES/keystone-log-critical.po0000664000567000056710000000154412701407102026564 0ustar jenkinsjenkins00000000000000# Translations template for keystone. # Copyright (C) 2015 OpenStack Foundation # This file is distributed under the same license as the keystone project. # # Translators: # OpenStack Infra , 2015. #zanata msgid "" msgstr "" "Project-Id-Version: keystone 9.0.0.0b2.dev256\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n" "POT-Creation-Date: 2016-01-18 05:50+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2014-08-31 03:19+0000\n" "Last-Translator: openstackjenkins \n" "Language: fr\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: French\n" #, python-format msgid "Unable to open template file %s" msgstr "Impossible d'ouvrir le fichier modèle %s" keystone-9.0.0/keystone/locale/fr/LC_MESSAGES/keystone.po0000664000567000056710000016144412701407105024226 0ustar jenkinsjenkins00000000000000# Translations template for keystone. # Copyright (C) 2015 OpenStack Foundation # This file is distributed under the same license as the keystone project. # # Translators: # Fries , 2014 # Maxime COQUEREL , 2014 # Andrew Melim , 2014 # Olivier Perrin , 2013 # Olivier Perrin , 2013 # Rémi Le Trocquer , 2014 # OpenStack Infra , 2015. #zanata # Tom Cocozzello , 2015. #zanata # Martine Marin , 2016. #zanata # Tom Cocozzello , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: keystone 9.0.0.0rc2.dev10\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n" "POT-Creation-Date: 2016-03-28 22:17+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-28 05:50+0000\n" "Last-Translator: Martine Marin \n" "Language: fr\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: French\n" #, python-format msgid "%(detail)s" msgstr "%(detail)s" #, python-format msgid "%(driver)s is not supported driver version" msgstr "%(driver)s n'est pas une version de pilote prise en charge" #, python-format msgid "" "%(entity)s name cannot contain the following reserved characters: %(chars)s" msgstr "" "Le nom %(entity)s ne peut pas contenir les caractères réservés suivants : " "%(chars)s" #, python-format msgid "" "%(event)s is not a valid notification event, must be one of: %(actions)s" msgstr "" "%(event)s n'est pas un événement de notification valide, doit être l'une des " "options suivantes : %(actions)s" #, python-format msgid "%(host)s is not a trusted dashboard host" msgstr "%(host)s n'est pas un hôte de tableau de bord digne de confiance" #, python-format msgid "%(message)s %(amendment)s" msgstr "%(message)s %(amendment)s" #, python-format msgid "" "%(mod_name)s doesn't provide database migrations. The migration repository " "path at %(path)s doesn't exist or isn't a directory." msgstr "" "%(mod_name)s ne permet pas les migrations de base de données. Le chemin du " "référentiel de migration %(path)s n'existe pas ou n'est pas un répertoire." #, python-format msgid "%(prior_role_id)s does not imply %(implied_role_id)s" msgstr "%(prior_role_id)s n'implique pas %(implied_role_id)s" #, python-format msgid "%(property_name)s cannot be less than %(min_length)s characters." msgstr "" "%(property_name)s ne peut pas contenir moins de %(min_length)s caractères." #, python-format msgid "%(property_name)s is not a %(display_expected_type)s" msgstr "%(property_name)s n'est pas du type %(display_expected_type)s" #, python-format msgid "%(property_name)s should not be greater than %(max_length)s characters." msgstr "" "%(property_name)s ne doit pas contenir plus de %(max_length)s caractères." #, python-format msgid "%(role_id)s cannot be an implied roles" msgstr "%(role_id)s ne peut pas être un rôle impliqué" #, python-format msgid "%s cannot be empty." msgstr "%s ne peut pas être vide." #, python-format msgid "%s extension does not exist." msgstr "L'extension %s n'existe pas." #, python-format msgid "%s field is required and cannot be empty" msgstr "La zone %s est obligatoire et ne peut pas être vide" #, python-format msgid "%s field(s) cannot be empty" msgstr "%s zone(s) ne peut(peuvent) pas être vide(s)" #, python-format msgid "" "%s for the LDAP identity backend has been deprecated in the Mitaka release " "in favor of read-only identity LDAP access. It will be removed in the \"O\" " "release." msgstr "" "%s pour le back-end d'identité LDAP est désormais obsolète dans l'édition " "Mitaka en faveur de l'accès LDAP d'identité en lecture seule. Sera supprimé " "dans l'édition \"O\"." msgid "(Disable insecure_debug mode to suppress these details.)" msgstr "(Désactivez le mode insecure_debug pour supprimer ces détails.)" msgid "--all option cannot be mixed with other options" msgstr "L'option -all ne peut pas être associée à d'autres options" msgid "A project-scoped token is required to produce a service catalog." msgstr "Un jeton de projet est requis pour produire un catalogue de service." msgid "Access token is expired" msgstr "Token d'accès est expiré" msgid "Access token not found" msgstr "Token d'accès non trouvé" msgid "Additional authentications steps required." msgstr "Authentifications étapes supplémentaires sont nécessaires ." msgid "An unexpected error occurred when retrieving domain configs" msgstr "" "Une erreur inattendue est survenue lors de l'extraction des configurations " "de domaine" #, python-format msgid "An unexpected error occurred when trying to store %s" msgstr "" "Une erreur inattendue est survenue lors de la tentative de stockage de %s" msgid "An unexpected error prevented the server from fulfilling your request." msgstr "Une erreur inattendue a empêché le serveur de traiter votre requête." #, python-format msgid "" "An unexpected error prevented the server from fulfilling your request: " "%(exception)s" msgstr "" "Une erreur inattendue a empêché le serveur de traiter votre requête: " "%(exception)s" msgid "An unhandled exception has occurred: Could not find metadata." msgstr "Une exception non gérée s'est produite : métadonnées introuvables." msgid "At least one option must be provided" msgstr "Au moins une option doit être fournie" msgid "At least one option must be provided, use either --all or --domain-name" msgstr "" "Au moins une option doit être indiquée. Utilisez --all ou --domain-name" msgid "At least one role should be specified." msgstr "Au moins un rôle doit être indiqué." #, python-format msgid "" "Attempted automatic driver selection for assignment based upon " "[identity]\\driver option failed since driver %s is not found. Set " "[assignment]/driver to a valid driver in keystone config." msgstr "" "La tentative de sélection du pilote automatique pour l'affectation basée sur " "l'option [identity]\\driver a échoué car le pilote %s est introuvable. " "Définissez l'option [assignment]/driver avec un pilote valide dans la " "configuration keystone." msgid "Attempted to authenticate with an unsupported method." msgstr "Tentative d'authentification avec une méthode non prise en charge ." msgid "" "Attempting to use OS-FEDERATION token with V2 Identity Service, use V3 " "Authentication" msgstr "" "Tentative d'utilisation du jeton OS-FEDERATION avec V2 Identity Service, " "utilisez l'authentification V3" msgid "Authentication plugin error." msgstr "Erreur d'authentification du plug-in." #, python-format msgid "" "Backend `%(backend)s` is not a valid memcached backend. Valid backends: " "%(backend_list)s" msgstr "" "Le back-end `%(backend)s` n'est pas un back-end memcached valide. Back-ends " "valides : %(backend_list)s" msgid "Cannot authorize a request token with a token issued via delegation." msgstr "" "Impossible d'autoriser un jeton de requête avec un jeton émis par " "l'intermédiaire de la délégation." #, python-format msgid "Cannot change %(option_name)s %(attr)s" msgstr "Impossible de modifier %(option_name)s %(attr)s" msgid "Cannot change Domain ID" msgstr "Impossible de modifier l'ID du domaine" msgid "Cannot change user ID" msgstr "Impossible de modifier l'id de l'utilisateur" msgid "Cannot change user name" msgstr "Impossible de changer le nom d'utilisateur" #, python-format msgid "Cannot create an endpoint with an invalid URL: %(url)s" msgstr "Impossible de créer un noeud final avec une URL non valide : %(url)s" #, python-format msgid "Cannot create project with parent: %(project_id)s" msgstr "Impossible de créer le projet %(project_id)s avec le parent" #, python-format msgid "" "Cannot create project, since it specifies its owner as domain %(domain_id)s, " "but specifies a parent in a different domain (%(parent_domain_id)s)." msgstr "" "Impossible de créer le projet, car il indique son propriétaire comme domaine " "%(domain_id)s, mais spécifie un parent figurant dans un autre domaine " "(%(parent_domain_id)s)." #, python-format msgid "" "Cannot create project, since its parent (%(domain_id)s) is acting as a " "domain, but project's specified parent_id (%(parent_id)s) does not match " "this domain_id." msgstr "" "Impossible de créer le projet, car son parent (%(domain_id)s) fait office de " "domaine, mais l'ID parent (%(parent_id)s) du projet spécifié ne correspond " "pas à cet ID de domaine (domain_id)." msgid "Cannot delete a domain that is enabled, please disable it first." msgstr "" "Impossible de supprimer un domaine activé, veuillez d'abord le désactiver." #, python-format msgid "" "Cannot delete project %(project_id)s since its subtree contains enabled " "projects." msgstr "" "Impossible de supprimer le projet %(project_id)s car son sous-arbre contient " "des projets activés." #, python-format msgid "" "Cannot delete the project %s since it is not a leaf in the hierarchy. Use " "the cascade option if you want to delete a whole subtree." msgstr "" "Impossible de supprimer le projet %s car il ne s'agit pas d'une feuille dans " "la hiérarchie. Utilisez l'option cascade si vous voulez supprimer un sous-" "arbre complet." #, python-format msgid "" "Cannot disable project %(project_id)s since its subtree contains enabled " "projects." msgstr "" "Impossible de désactiver le projet %(project_id)s car son sous-arbre " "contient des projets activés." #, python-format msgid "Cannot enable project %s since it has disabled parents" msgstr "Impossible d'activer le projet %s car ses parents sont désactivés" msgid "Cannot list assignments sourced from groups and filtered by user ID." msgstr "" "Impossible de répertorier les affectations en provenance de groupes et " "filtrées par ID utilisateur." msgid "Cannot list request tokens with a token issued via delegation." msgstr "" "Impossible de répertorier des jetons de requête avec un jeton émis par " "l'intermédiaire de la délégation." #, python-format msgid "Cannot open certificate %(cert_file)s. Reason: %(reason)s" msgstr "Impossible d'ouvrir le certificat %(cert_file)s. Raison: %(reason)s" #, python-format msgid "Cannot remove role that has not been granted, %s" msgstr "Impossible de retirer le rôle qui n'est pas accordé, %s" msgid "" "Cannot truncate a driver call without hints list as first parameter after " "self " msgstr "" "Impossible de tronquer un appel de pilote sans avoir hints list comme " "premier paramètre après self " msgid "Cannot update domain_id of a project that has children." msgstr "" "Impossible de mettre à jour l'ID de domaine (domain_id) d'un projet " "comportant des enfants." msgid "" "Cannot use parents_as_list and parents_as_ids query params at the same time." msgstr "" "Impossible d'utiliser les paramètres d'interrogation parents_as_list et " "parents_as_ids en même temps." msgid "" "Cannot use subtree_as_list and subtree_as_ids query params at the same time." msgstr "" "Impossible d'utiliser les paramètres d'interrogation subtree_as_list et " "subtree_as_ids en même temps." msgid "Cascade update is only allowed for enabled attribute." msgstr "La mise à jour en cascade n'est autorisée que pour l'attribut activé." msgid "" "Combining effective and group filter will always result in an empty list." msgstr "" "Le fait de combiner un filtre effectif et un filtre de groupes donnera " "toujours une liste vide." msgid "" "Combining effective, domain and inherited filters will always result in an " "empty list." msgstr "" "Le fait de combiner des filtres effectifs, de domaine et hérités donnera " "toujours une liste vide." #, python-format msgid "Config API entity at /domains/%s/config" msgstr "Entité Config API à /domains/%s/config" #, python-format msgid "Conflict occurred attempting to store %(type)s - %(details)s" msgstr "" "Un conflit s'est produit lors de la tentative de stockage de %(type)s - " "%(details)s" #, python-format msgid "Conflicting region IDs specified: \"%(url_id)s\" != \"%(ref_id)s\"" msgstr "" "ID de région contradictoires indiqués : \"%(url_id)s\" != \"%(ref_id)s\"" msgid "Consumer not found" msgstr "Client non trouvé" #, python-format msgid "" "Could not change immutable attribute(s) '%(attributes)s' in target %(target)s" msgstr "" "Impossible de modifier le(s) attribut(s) non modifiable(s) '%(attributes)s' " "dans la cible %(target)s" #, python-format msgid "" "Could not determine Identity Provider ID. The configuration option " "%(issuer_attribute)s was not found in the request environment." msgstr "" "Impossible de déterminer l'ID du fournisseur d'identité. L'option de " "configuration %(issuer_attribute)s est introuvable dans l'environnement de " "demande." #, python-format msgid "" "Could not find %(group_or_option)s in domain configuration for domain " "%(domain_id)s" msgstr "" "%(group_or_option)s introuvable dans la configuration de domaine pour le " "domaine %(domain_id)s" #, python-format msgid "Could not find Endpoint Group: %(endpoint_group_id)s" msgstr "Groupe de points finals introuvable : %(endpoint_group_id)s" msgid "Could not find Identity Provider identifier in environment" msgstr "" "L'identificateur de fournisseur d'identité est introuvable dans " "l'environnement." #, python-format msgid "Could not find Identity Provider: %(idp_id)s" msgstr "Impossible de trouver l'identité du Provider: %(idp_id)s" #, python-format msgid "Could not find Service Provider: %(sp_id)s" msgstr "Le fournisseur de services %(sp_id)s est introuvable" #, python-format msgid "Could not find credential: %(credential_id)s" msgstr "Impossible de trouver les paramètres du compte: %(credential_id)s" #, python-format msgid "Could not find domain: %(domain_id)s" msgstr "Impossible de trouver le domaine: %(domain_id)s" #, python-format msgid "Could not find endpoint: %(endpoint_id)s" msgstr "Noeud final %(endpoint_id)s introuvable." #, python-format msgid "" "Could not find federated protocol %(protocol_id)s for Identity Provider: " "%(idp_id)s" msgstr "" "Protocole fédéré %(protocol_id)s introuvable pour le fournisseur " "d'identité : %(idp_id)s" #, python-format msgid "Could not find group: %(group_id)s" msgstr "Impossible de trouver le groupe: %(group_id)s" #, python-format msgid "Could not find mapping: %(mapping_id)s" msgstr "Mappage %(mapping_id)s introuvable." msgid "Could not find policy association" msgstr "Association de règle introuvable." #, python-format msgid "Could not find policy: %(policy_id)s" msgstr "Règle %(policy_id)s introuvable." #, python-format msgid "Could not find project: %(project_id)s" msgstr "Impossible de trouver le projet: %(project_id)s" #, python-format msgid "Could not find region: %(region_id)s" msgstr "Impossible de trouver la région: %(region_id)s" #, python-format msgid "" "Could not find role assignment with role: %(role_id)s, user or group: " "%(actor_id)s, project or domain: %(target_id)s" msgstr "" "Affectation de rôle avec le rôle : %(role_id)s, l'utilisateur ou le groupe : " "%(actor_id)s, le projet ou le domaine : %(target_id)s introuvable" #, python-format msgid "Could not find role: %(role_id)s" msgstr "Impossible de trouver le rôle: %(role_id)s" #, python-format msgid "Could not find service: %(service_id)s" msgstr "Impossible de trouver le service: %(service_id)s" #, python-format msgid "Could not find token: %(token_id)s" msgstr "Impossible de trouver le token: %(token_id)s" #, python-format msgid "Could not find trust: %(trust_id)s" msgstr "Confiance %(trust_id)s introuvable." #, python-format msgid "Could not find user: %(user_id)s" msgstr "Impossible de trouver l'utilisateur: %(user_id)s" #, python-format msgid "Could not find version: %(version)s" msgstr "Impossible de trouver la version: %(version)s" #, python-format msgid "Could not find: %(target)s" msgstr "N'est pas trouvé: %(target)s" msgid "" "Could not map any federated user properties to identity values. Check debug " "logs or the mapping used for additional details." msgstr "" "Impossible de mapper des propriétés d'utilisateur fédéré avec des valeurs " "d'identité. Pour plus d'informations, consultez les journaux de débogage ou " "le mappage utilisé." msgid "" "Could not map user while setting ephemeral user identity. Either mapping " "rules must specify user id/name or REMOTE_USER environment variable must be " "set." msgstr "" "Impossible de mapper l'utilisateur lors de la définition de l'identité " "utilisateur éphémère. des règles de mappage doivent spécifier ID utilisateur/" "nom ou la variable d'environnement REMOTE_USER doit être définie." msgid "Could not validate the access token" msgstr "Impossible de valider le jeton d'accès" msgid "Credential belongs to another user" msgstr "Les données d'identification appartiennent à un autre utilisateur" msgid "Credential signature mismatch" msgstr "Signature des données d'identification non concordante" #, python-format msgid "" "Direct import of auth plugin %(name)r is deprecated as of Liberty in favor " "of its entrypoint from %(namespace)r and may be removed in N." msgstr "" "L'importation directe du plug-in d'authentification %(name)r est obsolète " "depuis Liberty en faveur de son point d'entrée depuis %(namespace)r et " "susceptible d'être supprimée dans N." #, python-format msgid "" "Direct import of driver %(name)r is deprecated as of Liberty in favor of its " "entrypoint from %(namespace)r and may be removed in N." msgstr "" "L'importation directe du pilote %(name)r est obsolète depuis Liberty en " "faveur de son point d'entrée depuis %(namespace)r et susceptible d'être " "supprimée dans N." msgid "" "Disabling an entity where the 'enable' attribute is ignored by configuration." msgstr "" "Désactivation d'une entité dont l'attribut 'enable' est ignoré par la " "configuration." #, python-format msgid "Domain (%s)" msgstr "Domaine (%s)" #, python-format msgid "Domain cannot be named %s" msgstr "Le domaine ne peut pas s'appeler %s" #, python-format msgid "Domain cannot have ID %s" msgstr "Le domaine ne peut pas posséder l'ID %s" #, python-format msgid "Domain is disabled: %s" msgstr "Domaine désactivé : %s" msgid "Domain name cannot contain reserved characters." msgstr "Le nom du domaine ne peut pas contenir des caractères réservés." msgid "Domain scoped token is not supported" msgstr "Le jeton de périmètre du domaine n'est pas pris en charge" msgid "Domain specific roles are not supported in the V8 role driver" msgstr "" "Les rôles spécifiques au domaine ne sont pas pris en charge dans le pilote " "de rôle V8 " #, python-format msgid "" "Domain: %(domain)s already has a configuration defined - ignoring file: " "%(file)s." msgstr "" "Le domaine : %(domain)s possède déjà une configuration définie - ce fichier " "sera ignoré : %(file)s." msgid "Duplicate Entry" msgstr "Entrée en double" #, python-format msgid "Duplicate ID, %s." msgstr "ID en double, %s." #, python-format msgid "Duplicate entry: %s" msgstr "Entrée en double : %s" #, python-format msgid "Duplicate name, %s." msgstr "Nom en double, %s." #, python-format msgid "Duplicate remote ID: %s" msgstr "ID distant en double : %s" msgid "EC2 access key not found." msgstr "Clé d'accès EC2 non trouvée." msgid "EC2 signature not supplied." msgstr "Signature EC2 non fournie." msgid "" "Either --bootstrap-password argument or OS_BOOTSTRAP_PASSWORD must be set." msgstr "" "L'argument --bootstrap-password ou OS_BOOTSTRAP_PASSWORD doit être défini." msgid "Enabled field must be a boolean" msgstr "La zone activée doit être un booléen" msgid "Enabled field should be a boolean" msgstr "La zone activée devrait être un booléen" #, python-format msgid "Endpoint %(endpoint_id)s not found in project %(project_id)s" msgstr "Noeud final %(endpoint_id)s introuvable dans le projet %(project_id)s" msgid "Endpoint Group Project Association not found" msgstr "Association de projets du groupe de points finals introuvable" msgid "Ensure configuration option idp_entity_id is set." msgstr "Assurez-vous que l'option de configuration idp_entity_id est définie." msgid "Ensure configuration option idp_sso_endpoint is set." msgstr "" "Assurez-vous que l'option de configuration idp_sso_endpoint est définie." #, python-format msgid "" "Error parsing configuration file for domain: %(domain)s, file: %(file)s." msgstr "" "Erreur lors de l'analyse syntaxique du fichier de configuration pour le " "domaine : %(domain)s, fichier : %(file)s." #, python-format msgid "Error while opening file %(path)s: %(err)s" msgstr "Erreur lors de l'ouverture du fichier %(path)s : %(err)s" #, python-format msgid "Error while parsing line: '%(line)s': %(err)s" msgstr "Erreur lors de l'analyse de la ligne : '%(line)s' : %(err)s" #, python-format msgid "Error while parsing rules %(path)s: %(err)s" msgstr "Erreur lors de l'analyse des règles %(path)s : %(err)s" #, python-format msgid "Error while reading metadata file, %(reason)s" msgstr "Erreur lors de la lecture des métadonnées du fichier, %(reason)s" #, python-format msgid "" "Exceeded attempts to register domain %(domain)s to use the SQL driver, the " "last domain that appears to have had it is %(last_domain)s, giving up" msgstr "" "Nombre de tentatives d'enregistrement du domaine %(domain)s dépassé pour " "utiliser le pilote SQL, le dernier domaine qui semble l'avoir eu est " "%(last_domain)s, abandon..." #, python-format msgid "Expected dict or list: %s" msgstr "Type dictionnaire ou liste attendu: %s" msgid "" "Expected signing certificates are not available on the server. Please check " "Keystone configuration." msgstr "" "Les certificats signataires attendus sont indisponibles sur le serveur. " "Veuillez vérifier la configuration de Keystone." #, python-format msgid "" "Expecting to find %(attribute)s in %(target)s - the server could not comply " "with the request since it is either malformed or otherwise incorrect. The " "client is assumed to be in error." msgstr "" "%(attribute)s recherché dans %(target)s - le serveur n'a pas pu se conformer " "à la requête puisqu'elle est mal formée ou incorrecte. Par défaut, le client " "est en erreur." #, python-format msgid "Failed to start the %(name)s server" msgstr "Impossible de démarrer le serveur %(name)s" msgid "Failed to validate token" msgstr "Echec de validation du token" msgid "Federation token is expired" msgstr "Le jeton Federation a expiré" #, python-format msgid "" "Field \"remaining_uses\" is set to %(value)s while it must not be set in " "order to redelegate a trust" msgstr "" "La zone \"remaining_uses\" est définie sur %(value)s alors qu'elle ne doit " "pas être définie pour redéléguer une fiducie" msgid "Found invalid token: scoped to both project and domain." msgstr "Jeton non valide trouvé : portée de projet et de domaine." #, python-format msgid "Group %s not found in config" msgstr "Groupe %s introuvable dans la configuration" #, python-format msgid "Group %(group)s is not supported for domain specific configurations" msgstr "" "Le groupe %(group)s n'est pas pris en charge pour les configurations " "spécifiques au domaine" #, python-format msgid "" "Group %(group_id)s returned by mapping %(mapping_id)s was not found in the " "backend." msgstr "" "Groupe %(group_id)s renvoyé par le mappage %(mapping_id)s introuvable dans " "le back-end." #, python-format msgid "" "Group membership across backend boundaries is not allowed, group in question " "is %(group_id)s, user is %(user_id)s" msgstr "" "Appartenance au groupe entre frontières dorsales interdite, le groupe en " "question est %(group_id)s, l'utilisateur est %(user_id)s" #, python-format msgid "ID attribute %(id_attr)s not found in LDAP object %(dn)s" msgstr "L'attribut ID %(id_attr)s est introuvable dans l'objet LDAP %(dn)s" #, python-format msgid "Identity Provider %(idp)s is disabled" msgstr "Le fournisseur d'identité %(idp)s est désactivé" msgid "" "Incoming identity provider identifier not included among the accepted " "identifiers." msgstr "" "L'identificateur entrant du fournisseur d'identité ne fait pas partie des " "identificateurs acceptés." msgid "Invalid EC2 signature." msgstr "Signature EC2 non valide." #, python-format msgid "Invalid LDAP TLS certs option: %(option)s. Choose one of: %(options)s" msgstr "" "Option de certificat TLS LDAP non valide : %(option)s. Choisissez l'une des " "options : %(options)s" #, python-format msgid "Invalid LDAP TLS_AVAIL option: %s. TLS not available" msgstr "Mauvaise option LDAP TLS_AVAIL: %s. TLS n'est pas disponible" #, python-format msgid "Invalid LDAP deref option: %(option)s. Choose one of: %(options)s" msgstr "" "Option déréférencée LDAP non valide : %(option)s. Choisir l'une des options " "suivantes : %(options)s" #, python-format msgid "Invalid LDAP scope: %(scope)s. Choose one of: %(options)s" msgstr "Portée LDAP invalide: %(scope)s. Choisissez parmi: %(options)s" msgid "Invalid TLS / LDAPS combination" msgstr "Combinaison TLS / LDAPS invalide" #, python-format msgid "Invalid audit info data type: %(data)s (%(type)s)" msgstr "Type de données d'information d'audit non valide : %(data)s (%(type)s)" msgid "Invalid blob in credential" msgstr "Blob non valide dans les informations d'identification" #, python-format msgid "" "Invalid domain name: %(domain)s found in config file name: %(file)s - " "ignoring this file." msgstr "" "Nom de domaine non valide : %(domain)s trouvé dans le nom du fichier de " "configuration : %(file)s - ce fichier sera ignoré." #, python-format msgid "Invalid domain specific configuration: %(reason)s" msgstr "Configuration spécifique au domaine non valide : %(reason)s" #, python-format msgid "Invalid input for field '%(path)s'. The value is '%(value)s'." msgstr "" "Valeur d'entrée incorrecte pour la zone '%(path)s'. La valeur est " "'%(value)s'." msgid "Invalid limit value" msgstr "Limite de valeur non valide" #, python-format msgid "" "Invalid mix of entities for policy association - only Endpoint, Service or " "Region+Service allowed. Request was - Endpoint: %(endpoint_id)s, Service: " "%(service_id)s, Region: %(region_id)s" msgstr "" "Combinaison non valide d'entités pour l'association de règle. Seules les " "entités Point final, Service ou Région+Service sont autorisées. La demande " "était Point final : %(endpoint_id)s, Service : %(service_id)s, Région : " "%(region_id)s" #, python-format msgid "" "Invalid rule: %(identity_value)s. Both 'groups' and 'domain' keywords must " "be specified." msgstr "" "Règle non valide : %(identity_value)s. Les mots clés 'groups' et 'domain' " "doivent être spécifiés." msgid "Invalid signature" msgstr "Signature non valide" msgid "Invalid user / password" msgstr "Login / Mot de passe non valide" msgid "Invalid username or TOTP passcode" msgstr "Nom d'utilisateur ou code d'authentification TOTP non valide" msgid "Invalid username or password" msgstr "Nom d'utilisateur ou mot de passe invalide" #, python-format msgid "KVS region %s is already configured. Cannot reconfigure." msgstr "KVS region %s est déjà configuré. Ne peut pas être reconfiguré." #, python-format msgid "Key Value Store not configured: %s" msgstr "La valeur de la clé du magasin n'est pas configurée : %s" #, python-format msgid "LDAP %s create" msgstr "Création LDAP %s" #, python-format msgid "LDAP %s delete" msgstr "Suppression LDAP %s" #, python-format msgid "LDAP %s update" msgstr "Mise à jour LDAP %s" msgid "" "Length of transformable resource id > 64, which is max allowed characters" msgstr "" "Longueur de l'ID de ressource transformable > 64 (nombre maximal de " "caractères autorisé)" #, python-format msgid "" "Local section in mapping %(mapping_id)s refers to a remote match that " "doesn't exist (e.g. {0} in a local section)." msgstr "" "La section locale dans le mappage %(mapping_id)s fait référence à une " "correspondance à distance qui n'existe pas (par ex. {0} dans une section " "locale)." #, python-format msgid "Lock Timeout occurred for key, %(target)s" msgstr "Le délai de verrouillage s'est produit pour la clé, %(target)s" #, python-format msgid "Lock key must match target key: %(lock)s != %(target)s" msgstr "" "La clé de verrouillage doit correspondre à la clé cible : %(lock)s != " "%(target)s" #, python-format msgid "Malformed endpoint URL (%(endpoint)s), see ERROR log for details." msgstr "" "Un caractère est mal formé dans URL (%(endpoint)s), regarder le log d'erreur " "pour plus de détails." msgid "Marker could not be found" msgstr "Le marqueur ne peut pas être trouvé" #, python-format msgid "Max hierarchy depth reached for %s branch." msgstr "La profondeur maximale de hiérarchie est atteinte pour la branche %s." #, python-format msgid "Maximum lock attempts on %s occurred." msgstr "Le nombre maximal de tentatives de verrouillage sur %s est atteint." #, python-format msgid "Member %(member)s is already a member of group %(group)s" msgstr "Le membre %(member)s est déjà membre du groupe %(group)s" #, python-format msgid "Method not callable: %s" msgstr "Impossible d'appeler la méthode %s" msgid "Missing entity ID from environment" msgstr "IP d'entité manquant de l'environnement" msgid "" "Modifying \"redelegation_count\" upon redelegation is forbidden. Omitting " "this parameter is advised." msgstr "" "La modification de \"redelegation_count\" lors de la redélégation est " "interdite. Il est conseillé d'omettre ce paramètre." msgid "Multiple domains are not supported" msgstr "Les domaines multiples ne sont pas pris en charge" msgid "Must be called within an active lock context." msgstr "Doit être appelé dans un contexte de verrou actif." msgid "Must specify either domain or project" msgstr "Indiquer obligatoirement un domaine ou un projet" msgid "Name field is required and cannot be empty" msgstr "La zone de nom est requise et ne peut pas être vide" msgid "Neither Project Domain ID nor Project Domain Name was provided." msgstr "Aucun ID ou nom de domaine de projet n'a été fourni." msgid "" "No Authorization headers found, cannot proceed with OAuth related calls, if " "running under HTTPd or Apache, ensure WSGIPassAuthorization is set to On." msgstr "" "Aucun en-tête d'autorisation trouvé, impossible de procéder aux appels liés " "à OAuth, en cas d'exécution sous HTTPd ou Apache, vérifiez que " "WSGIPassAuthorization est défini sur Activé." msgid "No authenticated user" msgstr "Aucun utilisateur authentifié" msgid "" "No encryption keys found; run keystone-manage fernet_setup to bootstrap one." msgstr "" "Aucune clé de chiffrement trouvée ; exécutez keystone-manage fernet_setup " "pour en amorcer une." msgid "No options specified" msgstr "Aucune option spécifiée" #, python-format msgid "No policy is associated with endpoint %(endpoint_id)s." msgstr "Aucune règle n'est associée au point final %(endpoint_id)s." #, python-format msgid "No remaining uses for trust: %(trust_id)s" msgstr "Aucune utilisation restante pour la confiance : %(trust_id)s" msgid "No token in the request" msgstr "Aucun jeton dans la demande" msgid "Non-default domain is not supported" msgstr "Le domaine non par défaut n'est pas pris en charge" msgid "One of the trust agents is disabled or deleted" msgstr "L'un des agents de confiance est désactivé ou supprimé" #, python-format msgid "" "Option %(option)s found with no group specified while checking domain " "configuration request" msgstr "" "Option %(option)s trouvée avec aucun groupe spécifié lors de la vérification " "de la demande de configuration du domaine" #, python-format msgid "" "Option %(option)s in group %(group)s is not supported for domain specific " "configurations" msgstr "" "L'option %(option)s dans le groupe %(group)s n'est pas prise en charge pour " "les configurations spécifiques au domaine" #, python-format msgid "Project (%s)" msgstr "Projet (%s)" #, python-format msgid "Project ID not found: %(t_id)s" msgstr "ID de projet introuvable : %(t_id)s" msgid "Project field is required and cannot be empty." msgstr "La zone Projet est requise et ne doit pas être vide." #, python-format msgid "Project is disabled: %s" msgstr "Projet désactivé : %s" msgid "Project name cannot contain reserved characters." msgstr "Le nom du projet ne peut pas contenir des caractères réservés." msgid "Query string is not UTF-8 encoded" msgstr "La chaîne de requête n'est pas au format UTF-8. " #, python-format msgid "" "Reading the default for option %(option)s in group %(group)s is not supported" msgstr "" "La lecture de la valeur par défaut pour l'option %(option)s dans le groupe " "%(group)s n'est pas prise en charge" msgid "Redelegation allowed for delegated by trust only" msgstr "Redélégation autorisée pour une délégation par fiducie uniquement" #, python-format msgid "" "Remaining redelegation depth of %(redelegation_depth)d out of allowed range " "of [0..%(max_count)d]" msgstr "" "Profondeur de redélégation restante %(redelegation_depth)d par rapport à la " "plage admise [0..%(max_count)d]" msgid "" "Remove admin_crud_extension from the paste pipeline, the admin_crud " "extension is now always available. Updatethe [pipeline:admin_api] section in " "keystone-paste.ini accordingly, as it will be removed in the O release." msgstr "" "Supprimez admin_crud_extension du pipeline de collage, l'extension " "admin_crud est désormais toujours disponible. Mettez à jour la section " "[pipeline:admin_api] dans le fichier keystone-paste.ini en conséquence, car " "elle sera supprimée dans l'édition O." msgid "" "Remove endpoint_filter_extension from the paste pipeline, the endpoint " "filter extension is now always available. Update the [pipeline:api_v3] " "section in keystone-paste.ini accordingly as it will be removed in the O " "release." msgstr "" "Supprimez endpoint_filter_extension du pipeline de collage, l'extension " "endpoint filter est désormais toujours disponible. Mettez à jour la section " "[pipeline:api_v3] dans le fichier keystone-paste.ini en conséquence car elle " "sera supprimée dans l'édition O." msgid "" "Remove federation_extension from the paste pipeline, the federation " "extension is now always available. Update the [pipeline:api_v3] section in " "keystone-paste.ini accordingly, as it will be removed in the O release." msgstr "" "Supprimez federation_extension du pipeline de collage, l'extension " "federation est désormais toujours disponible. Mettez à jour la section " "[pipeline:api_v3] dans le fichier keystone-paste.ini en conséquence, car " "elle sera supprimée dans l'édition O." msgid "" "Remove oauth1_extension from the paste pipeline, the oauth1 extension is now " "always available. Update the [pipeline:api_v3] section in keystone-paste.ini " "accordingly, as it will be removed in the O release." msgstr "" "Supprimez oauth1_extension du pipeline de collage, l'extension oauth1 est " "désormais toujours disponible. Mettez à jour la section [pipeline:api_v3] " "dans le fichier keystone-paste.ini en conséquence, car elle sera supprimée " "dans l'édition O." msgid "" "Remove revoke_extension from the paste pipeline, the revoke extension is now " "always available. Update the [pipeline:api_v3] section in keystone-paste.ini " "accordingly, as it will be removed in the O release." msgstr "" "Supprimez revoke_extension du pipeline de collage, l'extension revoke est " "désormais toujours disponible. Mettez à jour la section [pipeline:api_v3] " "dans le fichier keystone-paste.ini en conséquence, car elle sera supprimée " "dans l'édition O." msgid "" "Remove simple_cert from the paste pipeline, the PKI and PKIz token providers " "are now deprecated and simple_cert was only used insupport of these token " "providers. Update the [pipeline:api_v3] section in keystone-paste.ini " "accordingly, as it will be removed in the O release." msgstr "" "Supprimez simple_cert du pipeline de collage, les fournisseurs de jetons " "PKI et PKIz sont désormais obsolètes et simple_cert n'était utilisé que pour " "la prise en charge de ces fournisseurs. Mettez à jour la section [pipeline:" "api_v3] dans le fichier keystone-paste.ini en conséquence, car elle sera " "supprimée dans l'édition O." msgid "" "Remove user_crud_extension from the paste pipeline, the user_crud extension " "is now always available. Updatethe [pipeline:public_api] section in keystone-" "paste.ini accordingly, as it will be removed in the O release." msgstr "" "Supprimez user_crud_extension du pipeline de collage, l'extension user_crud " "est désormais toujours disponible. Mettez à jour la section [pipeline:" "public_api] dans le fichier keystone-paste.ini en conséquence, car elle sera " "supprimée de l'édition O." msgid "Request Token does not have an authorizing user id" msgstr "Le jeton de la demande ne possède pas d'ID utilisateur d'autorisation" #, python-format msgid "" "Request attribute %(attribute)s must be less than or equal to %(size)i. The " "server could not comply with the request because the attribute size is " "invalid (too large). The client is assumed to be in error." msgstr "" "La valeur de l'attribut %(attribute)s de la demande doit être inférieure ou " "égale à %(size)i. Il se peut que le serveur ne soit pas conforme à la " "demande car la taille de l'attribut est incorrecte (excessive). Par défaut, " "le client est en erreur." msgid "Request must have an origin query parameter" msgstr "La demande doit avoir un paramètre de requête d'origine" msgid "Request token is expired" msgstr "Le jeton de la demande a expiré" msgid "Request token not found" msgstr "Token de requete non trouvé" msgid "Requested expiration time is more than redelegated trust can provide" msgstr "" "Le délai d'expiration demandé dépasse celui que la fiducie redéléguée peut " "fournir" #, python-format msgid "" "Requested redelegation depth of %(requested_count)d is greater than allowed " "%(max_count)d" msgstr "" "La profondeur de redélégation demandée %(requested_count)d est supérieure à " "la limite autorisée %(max_count)d" msgid "" "Running keystone via eventlet is deprecated as of Kilo in favor of running " "in a WSGI server (e.g. mod_wsgi). Support for keystone under eventlet will " "be removed in the \"M\"-Release." msgstr "" "Exécution de keystone via eventlet est obsolète depuis Kilo et remplacée par " "l'exécution dans un serveur WSGI (par exemple, mod_wsgi). La prise en charge " "pour keystone sous l'eventlet sera supprimée dans \"M\"-Release." msgid "Scoping to both domain and project is not allowed" msgstr "La configuration du domaine et du projet n'est pas autorisée" msgid "Scoping to both domain and trust is not allowed" msgstr "" "La configuration du domaine et du certificat de confiance n'est pas autorisée" msgid "Scoping to both project and trust is not allowed" msgstr "" "La configuration du projet et du certificat de confiance n'est pas autorisée" #, python-format msgid "Service Provider %(sp)s is disabled" msgstr "Le fournisseur de services %(sp)s est désactivé" msgid "Some of requested roles are not in redelegated trust" msgstr "Certains rôles demandés ne font pas partie de la fiducie redéléguée" msgid "Specify a domain or project, not both" msgstr "Spécifier un domaine ou un projet, pas les deux" msgid "Specify a user or group, not both" msgstr "Spécifier un utilisateur ou groupe, pas les deux" msgid "Specify one of domain or project" msgstr "Indiquez un domaine ou un projet" msgid "Specify one of user or group" msgstr "Indiquez un utilisateur ou un groupe" #, python-format msgid "" "String length exceeded.The length of string '%(string)s' exceeded the limit " "of column %(type)s(CHAR(%(length)d))." msgstr "" "Longueur de chaîne dépassée. La longueur de la chaîne '%(string)s a dépassé " "la valeur maximale de colonne %(type)s(CHAR(%(length)d))." msgid "Tenant name cannot contain reserved characters." msgstr "Le nom du locataire ne peut pas contenir des caractères réservés." #, python-format msgid "" "The %s extension has been moved into keystone core and as such its " "migrations are maintained by the main keystone database control. Use the " "command: keystone-manage db_sync" msgstr "" "L'extension %s a été déplacée vers le service keystone de base et ses " "migrations sont donc gérées par le contrôle de la base de données keystone " "principale. Utilisez la commande : keystone-manage db_sync" msgid "" "The 'expires_at' must not be before now. The server could not comply with " "the request since it is either malformed or otherwise incorrect. The client " "is assumed to be in error." msgstr "" "La valeur 'expires_at' ne doit pas être située dans le passé. Le serveur n'a " "pas pu exécuter la demande vu qu'elle est mal formée ou incorrecte. Le " "client est considéré comme étant à l'état d'erreur." msgid "The --all option cannot be used with the --domain-name option" msgstr "L'option --all ne peut pas être utilisée avec l'option --domain-name" #, python-format msgid "The Keystone configuration file %(config_file)s could not be found." msgstr "" "Le fichier de configuration Keystone %(config_file)s ne peut pas être trouvé." #, python-format msgid "" "The Keystone domain-specific configuration has specified more than one SQL " "driver (only one is permitted): %(source)s." msgstr "" "La configuration spécifique au domaine keystone a spécifié plusieurs pilotes " "SQL (un seul est autorisé) : %(source)s." msgid "The action you have requested has not been implemented." msgstr "L'action que vous avez demandée n'a pas été implémentée." msgid "The authenticated user should match the trustor." msgstr "L'utilisateur authentifié doit correspondre au fiduciant." msgid "" "The certificates you requested are not available. It is likely that this " "server does not use PKI tokens otherwise this is the result of " "misconfiguration." msgstr "" "Les certificats que vous avez demandés sont indisponibles. Il est probable " "que ce serveur n'utilise pas les jetons PKI ; sinon, c'est le résultat d'un " "problème de configuration." msgid "The configured token provider does not support bind authentication." msgstr "" "Le fournisseur de jeton configuré ne prend pas en charge l'authentification " "par opération de liaison." msgid "The creation of projects acting as domains is not allowed in v2." msgstr "" "La création de projets faisant office de domaines n'est pas autorisée dans " "v2." #, python-format msgid "" "The password length must be less than or equal to %(size)i. The server could " "not comply with the request because the password is invalid." msgstr "" "La longueur du mot de passe doit être inférieure ou égale à %(size)i. n'est " "pas conforme à la demande car le mot de passe est incorrect." msgid "The request you have made requires authentication." msgstr "La demande que vous avez fait requiert une authentification." msgid "The resource could not be found." msgstr "La ressource est introuvable." msgid "" "The revoke call must not have both domain_id and project_id. This is a bug " "in the Keystone server. The current request is aborted." msgstr "" "L'appel de révocation ne doit pas contenir à la fois domain_id et " "project_id. Il s'agit d'un bogue dans le serveur Keystone. La demande en " "cours est abandonnée." msgid "The service you have requested is no longer available on this server." msgstr "Le service que vous avez demandé n'est plus disponible sur ce serveur." #, python-format msgid "" "The specified parent region %(parent_region_id)s would create a circular " "region hierarchy." msgstr "" "La région parent spécifiée %(parent_region_id)s risque de créer une " "hiérarchie de région circulaire." #, python-format msgid "" "The value of group %(group)s specified in the config should be a dictionary " "of options" msgstr "" "La valeur du groupe %(group)s spécifié dans la configuration doit être un " "dictionnaire d'options" msgid "There should not be any non-oauth parameters" msgstr "Aucun paramètre non-oauth ne doit être utilisé" #, python-format msgid "This is not a recognized Fernet payload version: %s" msgstr "Il ne s'agit pas d'une version de contenu Fernet reconnue : %s" #, python-format msgid "This is not a recognized Fernet token %s" msgstr "Il ne s'agit pas d'un jeton Fernet reconnu %s" msgid "" "Timestamp not in expected format. The server could not comply with the " "request since it is either malformed or otherwise incorrect. The client is " "assumed to be in error." msgstr "" "Horodatage n'est pas au format attendu. Le serveur n'a pas pu se conformer à " "la demande car elle est incorrectement formée ou incorrecte. Le client est " "considéré comme étant à l'état d'erreur." #, python-format msgid "" "To get a more detailed information on this error, re-run this command for " "the specific domain, i.e.: keystone-manage domain_config_upload --domain-" "name %s" msgstr "" "Pour obtenir des informations plus détaillées sur cette erreur, réexécutez " "cette commande pour le domaine spécifique, par exemple : keystone-manage " "domain_config_upload --domain-name %s" msgid "Token belongs to another user" msgstr "Le jeton appartient à un autre utilisateur" msgid "Token does not belong to specified tenant." msgstr "Le jeton n'appartient pas au titulaire spécifié." msgid "Token version is unrecognizable or unsupported." msgstr "Version de jeton non reconnue ou non prise en charge." msgid "Trustee has no delegated roles." msgstr "Le fiduciaire n'a aucun rôle délégué." msgid "Trustor is disabled." msgstr "Trustor est désactivé. " #, python-format msgid "" "Trying to update group %(group)s, so that, and only that, group must be " "specified in the config" msgstr "" "Tentative de mise à jour du groupe %(group)s, de sorte que le groupe soit " "spécifié dans la configuration uniquement" #, python-format msgid "" "Trying to update option %(option)s in group %(group)s, but config provided " "contains option %(option_other)s instead" msgstr "" "Tentative de mise à jour de l'option %(option)s dans le groupe %(group)s, " "mais la configuration fournie contient l'option %(option_other)s à la place" #, python-format msgid "" "Trying to update option %(option)s in group %(group)s, so that, and only " "that, option must be specified in the config" msgstr "" "Tentative de mise à jour de l'option %(option)s dans le groupe %(group)s, de " "sorte que l'option soit spécifiée dans la configuration uniquement" msgid "" "Unable to access the keystone database, please check it is configured " "correctly." msgstr "" "Impossible d'accéder à la base de données keystone, vérifiez qu'elle est " "configurée correctement." #, python-format msgid "Unable to consume trust %(trust_id)s, unable to acquire lock." msgstr "" "Impossible de consommer la confiance %(trust_id)s et d'acquérir un verrou." #, python-format msgid "" "Unable to delete region %(region_id)s because it or its child regions have " "associated endpoints." msgstr "" "Impossible de supprimer la région %(region_id)s car la région ou ses régions " "enfant ont des noeuds finals associés." msgid "Unable to downgrade schema" msgstr "Impossible de rétrograder le schéma" #, python-format msgid "Unable to find valid groups while using mapping %(mapping_id)s" msgstr "" "Impossible de trouver des groupes valides en utilisant le mappage " "%(mapping_id)s" #, python-format msgid "Unable to locate domain config directory: %s" msgstr "Impossible de localiser le répertoire de configuration domaine: %s" #, python-format msgid "Unable to lookup user %s" msgstr "Impossible de rechercher l'utilisateur %s" #, python-format msgid "" "Unable to reconcile identity attribute %(attribute)s as it has conflicting " "values %(new)s and %(old)s" msgstr "" "Impossible de rapprocher l'attribut d'identité %(attribute)s car il possède " "des valeurs en conflit : %(new)s et %(old)s" #, python-format msgid "" "Unable to sign SAML assertion. It is likely that this server does not have " "xmlsec1 installed, or this is the result of misconfiguration. Reason " "%(reason)s" msgstr "" "Impossible de signer l'assertion SAML. Il est probable que xmlsec1 ne soit " "pas installé sur ce serveur ; sinon, cela est dû à un problème de " "configuration. Raison : %(reason)s" msgid "Unable to sign token." msgstr "Impossible de signer le jeton" #, python-format msgid "Unexpected assignment type encountered, %s" msgstr "Type inattendu d'affectation, %s" #, python-format msgid "" "Unexpected combination of grant attributes - User: %(user_id)s, Group: " "%(group_id)s, Project: %(project_id)s, Domain: %(domain_id)s" msgstr "" "Combinaison inattendue d'attributs d'octroi - Utilisateur : %(user_id)s. " "Groupe : %(group_id)s. Projet : %(project_id)s. Domaine : %(domain_id)s" #, python-format msgid "Unexpected status requested for JSON Home response, %s" msgstr "Statut inattendu demandé pour la réponse JSON Home, %s" msgid "Unknown Target" msgstr "Cible inconnue" #, python-format msgid "Unknown domain '%(name)s' specified by --domain-name" msgstr "Domaine inconnu '%(name)s' spécifié par --domain-name" #, python-format msgid "Unknown token version %s" msgstr "Version de token inconnue %s" #, python-format msgid "Unregistered dependency: %(name)s for %(targets)s" msgstr "Dépendance désenregistrée : %(name)s pour %(targets)s" msgid "Update of `domain_id` is not allowed." msgstr "La mise à jour de `domain_id` n'est pas autorisée." msgid "Update of `is_domain` is not allowed." msgstr "La mise à jour de `is_domain` n'est pas autorisée." msgid "Update of `parent_id` is not allowed." msgstr "La mise à jour de `parent_id` est interdite." msgid "Update of domain_id is only allowed for root projects." msgstr "" "La mise à jour de l'ID de domaine (domain_id) est autorisée uniquement pour " "les projets racine." msgid "Update of domain_id of projects acting as domains is not allowed." msgstr "" "La mise à jour de l'ID de domaine (domain_id) des projets faisant office de " "domaines n'est pas autorisée." msgid "Use a project scoped token when attempting to create a SAML assertion" msgstr "" "Utilisez un jeton dont la portée est un projet lorsque vous essayez de créer " "une assertion SAML" msgid "" "Use of the identity driver config to automatically configure the same " "assignment driver has been deprecated, in the \"O\" release, the assignment " "driver will need to be expicitly configured if different than the default " "(SQL)." msgstr "" "L'utilisation de la configuration du pilote d'identité pour configurer " "automatiquement le même pilote d'affectation est désormais obsolète, dans " "l'édition \"O\", le pilote d'affectation doit être configuré explicitement " "s'il est différent de la valeur par défaut (SQL)." #, python-format msgid "User %(u_id)s is unauthorized for tenant %(t_id)s" msgstr "L'utilisateur %(u_id)s n'est pas autorisé pour le locataire %(t_id)s" #, python-format msgid "User %(user_id)s has no access to domain %(domain_id)s" msgstr "L'utilisateur %(user_id)s n'a pas accès au domaine %(domain_id)s" #, python-format msgid "User %(user_id)s has no access to project %(project_id)s" msgstr "L'utilisateur %(user_id)s n'a pas accès au projet %(project_id)s" #, python-format msgid "User %(user_id)s is already a member of group %(group_id)s" msgstr "L'utilisateur %(user_id)s est déjà membre du groupe %(group_id)s" #, python-format msgid "User '%(user_id)s' not found in group '%(group_id)s'" msgstr "Utilisateur '%(user_id)s' non trouvé dans le groupe '%(group_id)s'" msgid "User IDs do not match" msgstr "Les ID utilisateur ne correspondent pas." msgid "" "User auth cannot be built due to missing either user id, or user name with " "domain id, or user name with domain name." msgstr "" "L'authentification utilisateur ne peut pas être créée en raison de l'absence " "d'un ID, utilisateur, d'un nom d'utilisateur avec ID de domaine ou d'un nom " "utilisateur avec nom de domaine." #, python-format msgid "User is disabled: %s" msgstr "Utilisateur désactivé : %s" msgid "User is not a member of the requested project" msgstr "L'utilisateur n'est pas membre du projet demandé" msgid "User is not a trustee." msgstr "L'utilisateur n'est pas administrateur." msgid "User not found" msgstr "Utilisateur introuvable" msgid "User not valid for tenant." msgstr "Utilisateur non valide pour le locataire." msgid "User roles not supported: tenant_id required" msgstr "Rôles utilisateur non pris en charge : tenant_id est requis" #, python-format msgid "User type %s not supported" msgstr "Type d'utilisateur %s non pris en charge" msgid "You are not authorized to perform the requested action." msgstr "Vous n'êtes pas autorisé à effectuer l'action demandée" #, python-format msgid "You are not authorized to perform the requested action: %(action)s" msgstr "Vous n'êtes pas autorisé à effectuer l'action demandée: %(action)s" msgid "" "You have tried to create a resource using the admin token. As this token is " "not within a domain you must explicitly include a domain for this resource " "to belong to." msgstr "" "Vous avez essayé de créer une ressource à l'aide du jeton admin. Comme ce " "jeton ne figure pas dans un domaine, vous devez inclure explicitement un " "domaine auquel cette ressource doit appartenir." msgid "`key_mangler` functions must be callable." msgstr "Les fonctions `key_mangler` doivent pouvoir être appelées." msgid "`key_mangler` option must be a function reference" msgstr "L'option `key_mangler` doit être une référence de fonction" msgid "any options" msgstr "toute option" msgid "auth_type is not Negotiate" msgstr "auth_type n'est pas négocié" msgid "authorizing user does not have role required" msgstr "un rôle est facultatif pour l'utilisateur d'autorisation" #, python-format msgid "cannot create a project in a branch containing a disabled project: %s" msgstr "" "Impossible de créer un projet dans une branche qui contient un projet " "désactivé : %s" #, python-format msgid "" "cannot delete an enabled project acting as a domain. Please disable the " "project %s first." msgstr "" "impossible de supprimer un projet activé faisant office de domaine. Veuillez " "d'abord désactiver le projet %s." #, python-format msgid "group %(group)s" msgstr "groupe %(group)s" msgid "" "idp_contact_type must be one of: [technical, other, support, administrative " "or billing." msgstr "" "idp_contact_type doit avoir l'une des valeurs suivantes : [technical, other, " "support, administrative ou billing." #, python-format msgid "invalid date format %s" msgstr "Format de date non valide %s" #, python-format msgid "" "it is not permitted to have two projects acting as domains with the same " "name: %s" msgstr "" "il n'est pas autorisé d'avoir deux projets faisant office de domaines avec " "le même nom : %s" #, python-format msgid "" "it is not permitted to have two projects within a domain with the same " "name : %s" msgstr "" "il n'est pas autorisé d'avoir deux projets au sein d'un domaine avec le même " "nom : %s" msgid "only root projects are allowed to act as domains." msgstr "seuls les projets racine sont autorisés à faire office de domaines." #, python-format msgid "option %(option)s in group %(group)s" msgstr "option %(option)s dans le groupe %(group)s" msgid "provided consumer key does not match stored consumer key" msgstr "la clé du client fournie ne correspond pas à la clé du client stockée" msgid "provided request key does not match stored request key" msgstr "" "la clé de la demande fournie ne correspond pas à la clé de la demande stockée" msgid "provided verifier does not match stored verifier" msgstr "le vérificateur fourni ne correspond pas au vérificateur stocké" msgid "remaining_uses must be a positive integer or null." msgstr "remaining_uses doit être un entier positif ou nul." msgid "remaining_uses must not be set if redelegation is allowed" msgstr "" "remaining_uses ne doit pas être défini si la redélégation est autorisée" #, python-format msgid "" "request to update group %(group)s, but config provided contains group " "%(group_other)s instead" msgstr "" "demande de mise à jour du groupe %(group)s, mais la configuration fournie " "contient le groupe %(group_other)s à la place" msgid "rescope a scoped token" msgstr "Redéfinir la portée d'un jeton" #, python-format msgid "role %s is not defined" msgstr "Le rôle %s n'est pas défini" msgid "scope.project.id must be specified if include_subtree is also specified" msgstr "" "scope.project.id doit être spécifié si include_subtree est également spécifié" #, python-format msgid "tls_cacertdir %s not found or is not a directory" msgstr "tls_cacertdir %s introuvable ou n'est pas un répertoire" #, python-format msgid "tls_cacertfile %s not found or is not a file" msgstr "tls_cacertfile %s introuvable ou n'est pas un fichier" #, python-format msgid "token reference must be a KeystoneToken type, got: %s" msgstr "La référence de jeton doit être un type KeystoneToken, obtenu : %s" msgid "" "update of domain_id is deprecated as of Mitaka and will be removed in O." msgstr "" "la mise à jour de domain_id est obsolète depuis Mitaka et sera supprimée " "dans l'édition O." #, python-format msgid "" "validated expected to find %(param_name)r in function signature for " "%(func_name)r." msgstr "" "La validation s'attendait à rencontrer %(param_name)r dans la signature de " "fonction pour %(func_name)r." keystone-9.0.0/keystone/locale/keystone-log-critical.pot0000664000567000056710000000136312701407102024553 0ustar jenkinsjenkins00000000000000# Translations template for keystone. # Copyright (C) 2015 OpenStack Foundation # This file is distributed under the same license as the keystone project. # FIRST AUTHOR , 2015. # #, fuzzy msgid "" msgstr "" "Project-Id-Version: keystone 8.0.0.0b3.dev14\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n" "POT-Creation-Date: 2015-08-01 06:07+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 2.0\n" #: keystone/catalog/backends/templated.py:106 #, python-format msgid "Unable to open template file %s" msgstr "" keystone-9.0.0/keystone/locale/el/0000775000567000056710000000000012701407246020225 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/locale/el/LC_MESSAGES/0000775000567000056710000000000012701407246022012 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/locale/el/LC_MESSAGES/keystone-log-critical.po0000664000567000056710000000167412701407102026561 0ustar jenkinsjenkins00000000000000# Translations template for keystone. # Copyright (C) 2015 OpenStack Foundation # This file is distributed under the same license as the keystone project. # # Translators: # Efstathios Iosifidis , 2015 # OpenStack Infra , 2015. #zanata msgid "" msgstr "" "Project-Id-Version: keystone 9.0.0.0b2.dev256\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n" "POT-Creation-Date: 2016-01-18 05:50+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2015-09-05 01:09+0000\n" "Last-Translator: Efstathios Iosifidis \n" "Language: el\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Greek\n" #, python-format msgid "Unable to open template file %s" msgstr "Αδυναμία ανοίγματος αρχείου προτύπου %s" keystone-9.0.0/keystone/locale/zh_TW/0000775000567000056710000000000012701407246020660 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/locale/zh_TW/LC_MESSAGES/0000775000567000056710000000000012701407246022445 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/locale/zh_TW/LC_MESSAGES/keystone-log-critical.po0000664000567000056710000000153212701407102027205 0ustar jenkinsjenkins00000000000000# Translations template for keystone. # Copyright (C) 2015 OpenStack Foundation # This file is distributed under the same license as the keystone project. # # Translators: # OpenStack Infra , 2015. #zanata msgid "" msgstr "" "Project-Id-Version: keystone 9.0.0.0b2.dev256\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n" "POT-Creation-Date: 2016-01-18 05:50+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2014-08-31 03:19+0000\n" "Last-Translator: openstackjenkins \n" "Language: zh-TW\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Chinese (Taiwan)\n" #, python-format msgid "Unable to open template file %s" msgstr "無法開啟範本檔 %s" keystone-9.0.0/keystone/locale/zh_TW/LC_MESSAGES/keystone.po0000664000567000056710000014333512701407105024651 0ustar jenkinsjenkins00000000000000# Translations template for keystone. # Copyright (C) 2015 OpenStack Foundation # This file is distributed under the same license as the keystone project. # # Translators: # Lucas Palm , 2015. #zanata # OpenStack Infra , 2015. #zanata # Jennifer , 2016. #zanata # Lucas Palm , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: keystone 9.0.0.0rc2.dev1\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n" "POT-Creation-Date: 2016-03-16 22:54+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-18 12:28+0000\n" "Last-Translator: Jennifer \n" "Language: zh-TW\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Chinese (Taiwan)\n" #, python-format msgid "%(detail)s" msgstr "%(detail)s" #, python-format msgid "%(driver)s is not supported driver version" msgstr "%(driver)s 不是受支援的驅動程式版本" #, python-format msgid "" "%(entity)s name cannot contain the following reserved characters: %(chars)s" msgstr "%(entity)s 名稱不能包含下列保留字元:%(chars)s" #, python-format msgid "" "%(event)s is not a valid notification event, must be one of: %(actions)s" msgstr "%(event)s 不是有效的通知事件,必須是 %(actions)s 的其中之一" #, python-format msgid "%(host)s is not a trusted dashboard host" msgstr "%(host)s 不是授信儀表板主機" #, python-format msgid "%(message)s %(amendment)s" msgstr "%(message)s %(amendment)s" #, python-format msgid "" "%(mod_name)s doesn't provide database migrations. The migration repository " "path at %(path)s doesn't exist or isn't a directory." msgstr "" "%(mod_name)s 未提供資料庫移轉。%(path)s 處的移轉儲存庫路徑不存在或者不是目" "錄。" #, python-format msgid "%(prior_role_id)s does not imply %(implied_role_id)s" msgstr "%(prior_role_id)s 不暗示 %(implied_role_id)s" #, python-format msgid "%(property_name)s cannot be less than %(min_length)s characters." msgstr "%(property_name)s 不能少於 %(min_length)s 個字元。" #, python-format msgid "%(property_name)s is not a %(display_expected_type)s" msgstr "%(property_name)s 不是 %(display_expected_type)s" #, python-format msgid "%(property_name)s should not be greater than %(max_length)s characters." msgstr "%(property_name)s 不應超過 %(max_length)s 個字元。" #, python-format msgid "%(role_id)s cannot be an implied roles" msgstr "%(role_id)s 不能是隱含角色" #, python-format msgid "%s cannot be empty." msgstr "%s 不能是空的。" #, python-format msgid "%s extension does not exist." msgstr "%s 延伸不存在。" #, python-format msgid "%s field is required and cannot be empty" msgstr "%s 欄位是必要欄位,因此不能是空的" #, python-format msgid "%s field(s) cannot be empty" msgstr "%s 欄位不能是空的" #, python-format msgid "" "%s for the LDAP identity backend has been deprecated in the Mitaka release " "in favor of read-only identity LDAP access. It will be removed in the \"O\" " "release." msgstr "" "LDAP 身分後端的 %s 在 Mitaka 版本中已予以淘汰,以支援唯讀身分 LDAP 存取。它將" "在 \"O\" 版本中予以移除。" msgid "(Disable insecure_debug mode to suppress these details.)" msgstr "(停用 insecure_debug 模式,以暫停這些詳細資料。)" msgid "--all option cannot be mixed with other options" msgstr "--all 選項不能與其他選項混合" msgid "A project-scoped token is required to produce a service catalog." msgstr "需要專案範圍的記號來產生服務型錄。" msgid "Access token is expired" msgstr "存取記號過期" msgid "Access token not found" msgstr "找不到存取記號" msgid "Additional authentications steps required." msgstr "需要其他鑑別步驟。" msgid "An unexpected error occurred when retrieving domain configs" msgstr "擷取網域配置時發生非預期的錯誤" #, python-format msgid "An unexpected error occurred when trying to store %s" msgstr "嘗試儲存 %s 時發生非預期的錯誤" msgid "An unexpected error prevented the server from fulfilling your request." msgstr "發生非預期的錯誤,造成伺服器無法履行要求。" #, python-format msgid "" "An unexpected error prevented the server from fulfilling your request: " "%(exception)s" msgstr "發生非預期的錯誤,造成伺服器無法履行要求:%(exception)s" msgid "An unhandled exception has occurred: Could not find metadata." msgstr "發生無法處理的異常狀況:找不到 meta 資料。" msgid "At least one option must be provided" msgstr "必須提供至少一個選項" msgid "At least one option must be provided, use either --all or --domain-name" msgstr "必須提供至少一個選項,請使用 --all 或 --domain-name" msgid "At least one role should be specified." msgstr "應該至少指定一個角色。" #, python-format msgid "" "Attempted automatic driver selection for assignment based upon " "[identity]\\driver option failed since driver %s is not found. Set " "[assignment]/driver to a valid driver in keystone config." msgstr "" "針對基於 [identity]\\driver 選項的指派,嘗試自動選取驅動程式失敗,因為找不到" "驅動程式 %s。請在 Keystone 配置中,將 [assignment]/driver 設為有效的驅動程" "式。" msgid "Attempted to authenticate with an unsupported method." msgstr "已嘗試使用不支援的方法進行鑑別。" msgid "" "Attempting to use OS-FEDERATION token with V2 Identity Service, use V3 " "Authentication" msgstr "" "正在嘗試使用具有第 2 版身分服務的 OS-FEDERATION 記號,請使用第 3 版鑑別" msgid "Authentication plugin error." msgstr "鑑別外掛程式錯誤。" #, python-format msgid "" "Backend `%(backend)s` is not a valid memcached backend. Valid backends: " "%(backend_list)s" msgstr "" "後端 `%(backend)s` 不是有效的 memcached 後端。有效後端為:%(backend_list)s" msgid "Cannot authorize a request token with a token issued via delegation." msgstr "無法使用透過委派發出之記號授權要求記號。" #, python-format msgid "Cannot change %(option_name)s %(attr)s" msgstr "無法變更 %(option_name)s %(attr)s" msgid "Cannot change Domain ID" msgstr "無法變更網域 ID" msgid "Cannot change user ID" msgstr "無法變更使用者 ID" msgid "Cannot change user name" msgstr "無法變更使用者名稱" #, python-format msgid "Cannot create an endpoint with an invalid URL: %(url)s" msgstr "無法建立具有無效 URL %(url)s 的端點" #, python-format msgid "Cannot create project with parent: %(project_id)s" msgstr "無法建立具有母項的專案:%(project_id)s" #, python-format msgid "" "Cannot create project, since it specifies its owner as domain %(domain_id)s, " "but specifies a parent in a different domain (%(parent_domain_id)s)." msgstr "" "無法建立專案,因為它指定自己的擁有者作為網域 %(domain_id)s,但卻指定了位於不" "同網域 (%(parent_domain_id)s) 中的母項。" #, python-format msgid "" "Cannot create project, since its parent (%(domain_id)s) is acting as a " "domain, but project's specified parent_id (%(parent_id)s) does not match " "this domain_id." msgstr "" "無法建立專案,因為它的母項 (%(domain_id)s) 正在充當網域,但專案的指定 " "parent_id (%(parent_id)s) 與此 domain_id 不符。" msgid "Cannot delete a domain that is enabled, please disable it first." msgstr "無法刪除已啟用的網域,請先停用該網域。" #, python-format msgid "" "Cannot delete project %(project_id)s since its subtree contains enabled " "projects." msgstr "無法刪除專案 %(project_id)s,因為它的子樹狀結構包含已啟用的專案。" #, python-format msgid "" "Cannot delete the project %s since it is not a leaf in the hierarchy. Use " "the cascade option if you want to delete a whole subtree." msgstr "" "無法刪除專案 %s,因為它不是階層中的葉節點。如果要刪除整個子樹狀結構,請使用重" "疊顯示選項。" #, python-format msgid "" "Cannot disable project %(project_id)s since its subtree contains enabled " "projects." msgstr "無法停用專案 %(project_id)s,因為它的子樹狀結構包含已啟用的專案。" #, python-format msgid "Cannot enable project %s since it has disabled parents" msgstr "無法啟用專案 %s,因為它具有已停用的母項" msgid "Cannot list assignments sourced from groups and filtered by user ID." msgstr "無法列出由群組提供且依使用者 ID 進行過濾的指派。" msgid "Cannot list request tokens with a token issued via delegation." msgstr "無法列出含有透過委派發出之記號的要求記號。" #, python-format msgid "Cannot open certificate %(cert_file)s. Reason: %(reason)s" msgstr "無法開啟憑證 %(cert_file)s。原因:%(reason)s" #, python-format msgid "Cannot remove role that has not been granted, %s" msgstr "無法移除尚未授權的角色,%s" msgid "" "Cannot truncate a driver call without hints list as first parameter after " "self " msgstr "屬性 limit 不在 hints 清單時,無法截斷驅動程式呼叫" msgid "Cannot update domain_id of a project that has children." msgstr "無法更新包含子項之專案的 domain_id。" msgid "" "Cannot use parents_as_list and parents_as_ids query params at the same time." msgstr "無法同時使用 parents_as_list 與 parents_as_ids查詢參數。" msgid "" "Cannot use subtree_as_list and subtree_as_ids query params at the same time." msgstr "無法同時使用 subtree_as_list 與 subtree_as_ids 查詢參數。" msgid "Cascade update is only allowed for enabled attribute." msgstr "只容許對已啟用的屬性進行重疊顯示更新。" msgid "" "Combining effective and group filter will always result in an empty list." msgstr "結合作用中的過濾器和群組過濾器將一律導致空清單。" msgid "" "Combining effective, domain and inherited filters will always result in an " "empty list." msgstr "結合作用中的過濾器、網域過濾器及繼承的過濾器將一律導致空清單。" #, python-format msgid "Config API entity at /domains/%s/config" msgstr "在 /domains/%s/config 處配置 API 實體" #, python-format msgid "Conflict occurred attempting to store %(type)s - %(details)s" msgstr "嘗試儲存 %(type)s 時發生衝突 - %(details)s" #, python-format msgid "Conflicting region IDs specified: \"%(url_id)s\" != \"%(ref_id)s\"" msgstr "指定了相衝突的區域 ID:\"%(url_id)s\" != \"%(ref_id)s\"" msgid "Consumer not found" msgstr "找不到消費者" #, python-format msgid "" "Could not change immutable attribute(s) '%(attributes)s' in target %(target)s" msgstr "無法變更目標 %(target)s 中固定不變的屬性 '%(attributes)s'" #, python-format msgid "" "Could not determine Identity Provider ID. The configuration option " "%(issuer_attribute)s was not found in the request environment." msgstr "" "無法判定身分提供者 ID。在要求環境中,找不到配置選項%(issuer_attribute)s。" #, python-format msgid "" "Could not find %(group_or_option)s in domain configuration for domain " "%(domain_id)s" msgstr "在下列網域的網域配置中找不到 %(group_or_option)s:%(domain_id)s" #, python-format msgid "Could not find Endpoint Group: %(endpoint_group_id)s" msgstr "找不到端點群組:%(endpoint_group_id)s" msgid "Could not find Identity Provider identifier in environment" msgstr "在環境中找不到身分提供者 ID" #, python-format msgid "Could not find Identity Provider: %(idp_id)s" msgstr "找不到身分提供者:%(idp_id)s" #, python-format msgid "Could not find Service Provider: %(sp_id)s" msgstr "找不到服務提供者:%(sp_id)s" #, python-format msgid "Could not find credential: %(credential_id)s" msgstr "找不到認證:%(credential_id)s" #, python-format msgid "Could not find domain: %(domain_id)s" msgstr "找不到網域:%(domain_id)s" #, python-format msgid "Could not find endpoint: %(endpoint_id)s" msgstr "找不到端點:%(endpoint_id)s" #, python-format msgid "" "Could not find federated protocol %(protocol_id)s for Identity Provider: " "%(idp_id)s" msgstr "找不到下列身分提供者的聯合通訊協定 %(protocol_id)s:%(idp_id)s" #, python-format msgid "Could not find group: %(group_id)s" msgstr "找不到群組:%(group_id)s" #, python-format msgid "Could not find mapping: %(mapping_id)s" msgstr "找不到對映:%(mapping_id)s" msgid "Could not find policy association" msgstr "找不到原則關聯" #, python-format msgid "Could not find policy: %(policy_id)s" msgstr "找不到原則:%(policy_id)s" #, python-format msgid "Could not find project: %(project_id)s" msgstr "找不到專案:%(project_id)s" #, python-format msgid "Could not find region: %(region_id)s" msgstr "找不到區域:%(region_id)s" #, python-format msgid "" "Could not find role assignment with role: %(role_id)s, user or group: " "%(actor_id)s, project or domain: %(target_id)s" msgstr "" "找不到具有角色 %(role_id)s、使用者或群組 %(actor_id)s、專案或網域 " "%(target_id)s 的角色指派" #, python-format msgid "Could not find role: %(role_id)s" msgstr "找不到角色:%(role_id)s" #, python-format msgid "Could not find service: %(service_id)s" msgstr "找不到服務:%(service_id)s" #, python-format msgid "Could not find token: %(token_id)s" msgstr "找不到記號:%(token_id)s" #, python-format msgid "Could not find trust: %(trust_id)s" msgstr "找不到信任:%(trust_id)s" #, python-format msgid "Could not find user: %(user_id)s" msgstr "找不到使用者:%(user_id)s" #, python-format msgid "Could not find version: %(version)s" msgstr "找不到版本:%(version)s" #, python-format msgid "Could not find: %(target)s" msgstr "找不到:%(target)s" msgid "" "Could not map any federated user properties to identity values. Check debug " "logs or the mapping used for additional details." msgstr "" "無法將任何聯合使用者內容對映至身分值。如需其他詳細資料,請檢查除錯日誌或使用" "的對映。" msgid "" "Could not map user while setting ephemeral user identity. Either mapping " "rules must specify user id/name or REMOTE_USER environment variable must be " "set." msgstr "" "設定暫時使用者身分時,無法對映使用者。對映規則必須指定使用者 ID/名稱,或者必" "須設定 REMOTE_USER環境變數。" msgid "Could not validate the access token" msgstr "無法驗證存取記號" msgid "Credential belongs to another user" msgstr "認證屬於另一個使用者" msgid "Credential signature mismatch" msgstr "認證簽章不符" #, python-format msgid "" "Direct import of auth plugin %(name)r is deprecated as of Liberty in favor " "of its entrypoint from %(namespace)r and may be removed in N." msgstr "" "不建議直接匯入鑑別外掛程式 %(name)r,因為 Liberty 支援它在 %(namespace)r 中的" "進入點且可能在 N 中予以移除。" #, python-format msgid "" "Direct import of driver %(name)r is deprecated as of Liberty in favor of its " "entrypoint from %(namespace)r and may be removed in N." msgstr "" "不建議直接匯入驅動程式 %(name)r,因為 Liberty 支援它在 %(namespace)r 中的進入" "點且可能在 N 中予以移除。" msgid "" "Disabling an entity where the 'enable' attribute is ignored by configuration." msgstr "正在停用配置已忽略其 'enable' 屬性的實體。" #, python-format msgid "Domain (%s)" msgstr "網域 (%s)" #, python-format msgid "Domain cannot be named %s" msgstr "網域不能命名為 %s" #, python-format msgid "Domain cannot have ID %s" msgstr "網域不能具有 ID %s" #, python-format msgid "Domain is disabled: %s" msgstr "已停用網域:%s" msgid "Domain name cannot contain reserved characters." msgstr "網域名稱不能包含保留字元。" msgid "Domain scoped token is not supported" msgstr "不支援網域範圍的記號" msgid "Domain specific roles are not supported in the V8 role driver" msgstr "網域專屬角色在第 8 版角色驅動程式中不受支援" #, python-format msgid "" "Domain: %(domain)s already has a configuration defined - ignoring file: " "%(file)s." msgstr "網域 %(domain)s 已定義配置 - 正在忽略檔案 %(file)s。" msgid "Duplicate Entry" msgstr "項目重複" #, python-format msgid "Duplicate ID, %s." msgstr "重複的 ID,%s。" #, python-format msgid "Duplicate entry: %s" msgstr "重複項目:%s" #, python-format msgid "Duplicate name, %s." msgstr "重複的名稱,%s。" #, python-format msgid "Duplicate remote ID: %s" msgstr "重複的遠端 ID:%s" msgid "EC2 access key not found." msgstr "找不到 EC2 存取金鑰。" msgid "EC2 signature not supplied." msgstr "未提供 EC2 簽章。" msgid "" "Either --bootstrap-password argument or OS_BOOTSTRAP_PASSWORD must be set." msgstr "必須設定 --bootstrap-password 引數或 OS_BOOTSTRAP_PASSWORD。" msgid "Enabled field must be a boolean" msgstr "「已啟用」欄位必須是布林值" msgid "Enabled field should be a boolean" msgstr "「已啟用」欄位應該是布林值" #, python-format msgid "Endpoint %(endpoint_id)s not found in project %(project_id)s" msgstr "在專案 %(project_id)s 中找不到端點 %(endpoint_id)s" msgid "Endpoint Group Project Association not found" msgstr "找不到端點群組專案關聯" msgid "Ensure configuration option idp_entity_id is set." msgstr "請確保已設定配置選項 idp_entity_id。" msgid "Ensure configuration option idp_sso_endpoint is set." msgstr "請確保已設定配置選項 idp_sso_endpoint。" #, python-format msgid "" "Error parsing configuration file for domain: %(domain)s, file: %(file)s." msgstr "剖析網域 %(domain)s 的配置檔時發生錯誤,檔案:%(file)s。" #, python-format msgid "Error while opening file %(path)s: %(err)s" msgstr "開啟檔案 %(path)s 時發生錯誤:%(err)s" #, python-format msgid "Error while parsing line: '%(line)s': %(err)s" msgstr "剖析行 '%(line)s' 時發生錯誤:%(err)s" #, python-format msgid "Error while parsing rules %(path)s: %(err)s" msgstr "剖析規則 %(path)s 時發生錯誤:%(err)s" #, python-format msgid "Error while reading metadata file, %(reason)s" msgstr "讀取 meta 資料檔時發生錯誤,%(reason)s" #, python-format msgid "" "Exceeded attempts to register domain %(domain)s to use the SQL driver, the " "last domain that appears to have had it is %(last_domain)s, giving up" msgstr "" "已超過嘗試登錄網域 %(domain)s 以使用 SQL 驅動程式的次數,似乎已經具有它的最後" "一個網域是 %(last_domain)s,將放棄" #, python-format msgid "Expected dict or list: %s" msgstr "預期字典或清單:%s" msgid "" "Expected signing certificates are not available on the server. Please check " "Keystone configuration." msgstr "伺服器上無法使用預期的簽署憑證。請檢查 Keystone 配置。" #, python-format msgid "" "Expecting to find %(attribute)s in %(target)s - the server could not comply " "with the request since it is either malformed or otherwise incorrect. The " "client is assumed to be in error." msgstr "" "預期在 %(target)s 中找到 %(attribute)s - 伺服器無法遵守要求,因為它的格式不正" "確。系統會假定用戶端處於錯誤狀態。" #, python-format msgid "Failed to start the %(name)s server" msgstr "無法啟動 %(name)s 伺服器" msgid "Failed to validate token" msgstr "無法驗證記號" msgid "Federation token is expired" msgstr "聯合記號過期" #, python-format msgid "" "Field \"remaining_uses\" is set to %(value)s while it must not be set in " "order to redelegate a trust" msgstr "" "欄位 \"remaining_uses\" 設定為 %(value)s,但為了重新委派信任,不能設定該欄位" msgid "Found invalid token: scoped to both project and domain." msgstr "找到無效記號:已將範圍限定為專案及網域。" #, python-format msgid "Group %s not found in config" msgstr "在配置中找不到群組 %s" #, python-format msgid "Group %(group)s is not supported for domain specific configurations" msgstr "網域特定配置不支援群組 %(group)s" #, python-format msgid "" "Group %(group_id)s returned by mapping %(mapping_id)s was not found in the " "backend." msgstr "在後端找不到對映 %(mapping_id)s 所傳回的群組 %(group_id)s。" #, python-format msgid "" "Group membership across backend boundaries is not allowed, group in question " "is %(group_id)s, user is %(user_id)s" msgstr "" "不容許後端界限之間的群組成員資格,有問題的群組為%(group_id)s,使用者為 " "%(user_id)s" #, python-format msgid "ID attribute %(id_attr)s not found in LDAP object %(dn)s" msgstr "在 LDAP 物件 %(dn)s 中找不到 ID 屬性 %(id_attr)s" #, python-format msgid "Identity Provider %(idp)s is disabled" msgstr "已停用身分提供者 %(idp)s" msgid "" "Incoming identity provider identifier not included among the accepted " "identifiers." msgstr "送入的身分提供者 ID 未包括在接受的 ID 中。" msgid "Invalid EC2 signature." msgstr "無效的 EC2 簽章。" #, python-format msgid "Invalid LDAP TLS certs option: %(option)s. Choose one of: %(options)s" msgstr "無效的 LDAP TLS 憑證選項:%(option)s。請選擇 %(options)s 的其中之一" #, python-format msgid "Invalid LDAP TLS_AVAIL option: %s. TLS not available" msgstr "無效的 LDAP TLS_AVAIL 選項:%s。TLS 無法使用" #, python-format msgid "Invalid LDAP deref option: %(option)s. Choose one of: %(options)s" msgstr "無效的 LDAP deref 選項:%(option)s。請選擇 %(options)s 的其中之一" #, python-format msgid "Invalid LDAP scope: %(scope)s. Choose one of: %(options)s" msgstr "無效的 LDAP 範圍:%(scope)s。請選擇 %(options)s 的其中之一" msgid "Invalid TLS / LDAPS combination" msgstr "無效的 TLS/LDAPS 組合" #, python-format msgid "Invalid audit info data type: %(data)s (%(type)s)" msgstr "審核資訊資料類型無效:%(data)s (%(type)s)" msgid "Invalid blob in credential" msgstr "認證中的二進位大型物件無效" #, python-format msgid "" "Invalid domain name: %(domain)s found in config file name: %(file)s - " "ignoring this file." msgstr "" "在配置檔名稱 %(file)s 中找到的網域名稱 %(domain)s 無效 - 正在忽略此檔案。" #, python-format msgid "Invalid domain specific configuration: %(reason)s" msgstr "網域特定配置無效:%(reason)s" #, python-format msgid "Invalid input for field '%(path)s'. The value is '%(value)s'." msgstr "欄位 '%(path)s' 的輸入無效。值為 '%(value)s'。" msgid "Invalid limit value" msgstr "無效的限制值" #, python-format msgid "" "Invalid mix of entities for policy association - only Endpoint, Service or " "Region+Service allowed. Request was - Endpoint: %(endpoint_id)s, Service: " "%(service_id)s, Region: %(region_id)s" msgstr "" "原則關聯的混合實體無效 - 僅容許「端點」、「服務」或「區域+服務」。要求為 -" "「端點」:%(endpoint_id)s,「服務」:%(service_id)s,「區域」:%(region_id)s" #, python-format msgid "" "Invalid rule: %(identity_value)s. Both 'groups' and 'domain' keywords must " "be specified." msgstr "規則 %(identity_value)s 無效。必須指定 'groups' 及 'domain' 關鍵字。" msgid "Invalid signature" msgstr "無效的簽章" msgid "Invalid user / password" msgstr "無效的使用者/密碼" msgid "Invalid username or TOTP passcode" msgstr "無效的使用者名稱或 TOTP 密碼" msgid "Invalid username or password" msgstr "無效的使用者名稱或密碼" #, python-format msgid "KVS region %s is already configured. Cannot reconfigure." msgstr "KVS 區域 %s 已配置。無法重新配置。" #, python-format msgid "Key Value Store not configured: %s" msgstr "未配置金鑰值儲存庫:%s" #, python-format msgid "LDAP %s create" msgstr "LDAP %s 建立" #, python-format msgid "LDAP %s delete" msgstr "LDAP %s 刪除" #, python-format msgid "LDAP %s update" msgstr "LDAP %s 更新" msgid "" "Length of transformable resource id > 64, which is max allowed characters" msgstr "可轉換資源 ID 的長度大於 64(這是所容許的字元數目上限)" #, python-format msgid "" "Local section in mapping %(mapping_id)s refers to a remote match that " "doesn't exist (e.g. {0} in a local section)." msgstr "" "對映 %(mapping_id)s 中的本端區段參照了一個不存在的遠端相符項(例如,本端區段" "中的 '{0}')。" #, python-format msgid "Lock Timeout occurred for key, %(target)s" msgstr "金鑰 %(target)s 發生鎖定逾時" #, python-format msgid "Lock key must match target key: %(lock)s != %(target)s" msgstr "鎖定金鑰必須與目標金鑰相符:%(lock)s 不等於 %(target)s" #, python-format msgid "Malformed endpoint URL (%(endpoint)s), see ERROR log for details." msgstr "端點 URL (%(endpoint)s) 的格式不正確,請參閱錯誤日誌以取得詳細資料。" msgid "Marker could not be found" msgstr "找不到標記" #, python-format msgid "Max hierarchy depth reached for %s branch." msgstr "已達到 %s 分支的階層深度上限。" #, python-format msgid "Maximum lock attempts on %s occurred." msgstr "已達到 %s 的鎖定嘗試次數上限。" #, python-format msgid "Member %(member)s is already a member of group %(group)s" msgstr "成員 %(member)s 已是群組 %(group)s 的成員" #, python-format msgid "Method not callable: %s" msgstr "方法不可呼叫:%s" msgid "Missing entity ID from environment" msgstr "環境中遺漏實體 ID" msgid "" "Modifying \"redelegation_count\" upon redelegation is forbidden. Omitting " "this parameter is advised." msgstr "禁止在重新委派時修改 \"redelegation_count\"。建議省略此參數。" msgid "Multiple domains are not supported" msgstr "不支援多個網域" msgid "Must be called within an active lock context." msgstr "必須在作用中鎖定環境定義內呼叫。" msgid "Must specify either domain or project" msgstr "必須指定 Domain 或 Project" msgid "Name field is required and cannot be empty" msgstr "名稱欄位是必要欄位,因此不能是空的" msgid "Neither Project Domain ID nor Project Domain Name was provided." msgstr "既未提供「專案網域 ID」,也未提供「專案網域名稱」。" msgid "" "No Authorization headers found, cannot proceed with OAuth related calls, if " "running under HTTPd or Apache, ensure WSGIPassAuthorization is set to On." msgstr "" "找不到授權標頭,無法繼續進行 OAuth 相關呼叫,如果在 HTTPd 或 Apache 下執行," "請確保 WSGIPassAuthorization 設定為 On。" msgid "No authenticated user" msgstr "沒有已鑑別使用者" msgid "" "No encryption keys found; run keystone-manage fernet_setup to bootstrap one." msgstr "找不到加密金鑰;請執行 keystone-manage fernet_setup 以引導一個。" msgid "No options specified" msgstr "未指定選項" #, python-format msgid "No policy is associated with endpoint %(endpoint_id)s." msgstr "沒有原則與端點 %(endpoint_id)s 相關聯。" #, python-format msgid "No remaining uses for trust: %(trust_id)s" msgstr "沒有信任 %(trust_id)s 的剩餘使用情形" msgid "No token in the request" msgstr "要求中沒有記號" msgid "Non-default domain is not supported" msgstr "不支援非預設網域" msgid "One of the trust agents is disabled or deleted" msgstr "已停用或刪除其中一個信任代理程式" #, python-format msgid "" "Option %(option)s found with no group specified while checking domain " "configuration request" msgstr "檢查網域配置要求時,發現選項 %(option)s 未指定任何群組" #, python-format msgid "" "Option %(option)s in group %(group)s is not supported for domain specific " "configurations" msgstr "網域特定配置不支援群組 %(group)s 中的選項 %(option)s" #, python-format msgid "Project (%s)" msgstr "專案 (%s)" #, python-format msgid "Project ID not found: %(t_id)s" msgstr "找不到專案 ID:%(t_id)s" msgid "Project field is required and cannot be empty." msgstr "專案欄位是必要的,不能是空的。" #, python-format msgid "Project is disabled: %s" msgstr "已停用專案:%s" msgid "Project name cannot contain reserved characters." msgstr "專案名稱不能包含保留字元。" msgid "Query string is not UTF-8 encoded" msgstr "查詢字串未使用 UTF-8 進行編碼" #, python-format msgid "" "Reading the default for option %(option)s in group %(group)s is not supported" msgstr "不支援讀取群組 %(group)s 中選項 %(option)s 的預設值" msgid "Redelegation allowed for delegated by trust only" msgstr "僅委派為信任時,才容許重新委派" #, python-format msgid "" "Remaining redelegation depth of %(redelegation_depth)d out of allowed range " "of [0..%(max_count)d]" msgstr "" "剩餘的重新委派深度 %(redelegation_depth)d 超出容許的範圍 [0..%(max_count)d]" msgid "" "Remove admin_crud_extension from the paste pipeline, the admin_crud " "extension is now always available. Updatethe [pipeline:admin_api] section in " "keystone-paste.ini accordingly, as it will be removed in the O release." msgstr "" "從貼上 Pipeline 中移除 admin_crud_extension,admin_crud 延伸現在將一律可用。" "相應地更新 keystone-paste.ini 中的 [pipeline:admin_api] 區段,因為它在 O 版本" "中將予以移除。" msgid "" "Remove endpoint_filter_extension from the paste pipeline, the endpoint " "filter extension is now always available. Update the [pipeline:api_v3] " "section in keystone-paste.ini accordingly as it will be removed in the O " "release." msgstr "" "從貼上 Pipeline 中移除 endpoint_filter_extension,端點過濾器延伸現在將一律可" "用。相應地更新 keystone-paste.ini 中的 [pipeline:api_v3] 區段,因為它在 O 版" "本中將予以移除。" msgid "" "Remove federation_extension from the paste pipeline, the federation " "extension is now always available. Update the [pipeline:api_v3] section in " "keystone-paste.ini accordingly, as it will be removed in the O release." msgstr "" "從貼上 Pipeline 中移除 federation_extension,聯合延伸現在將一律可用。相應地更" "新 keystone-paste.ini 中的 [pipeline:api_v3] 區段,因為它在 O 版本中將予以移" "除。" msgid "" "Remove oauth1_extension from the paste pipeline, the oauth1 extension is now " "always available. Update the [pipeline:api_v3] section in keystone-paste.ini " "accordingly, as it will be removed in the O release." msgstr "" "從貼上 Pipeline 中移除 oauth1_extension,oauth1 延伸現在將一律可用。相應地更" "新 keystone-paste.ini 中的 [pipeline:api_v3] 區段,因為它在 O 版本中將予以移" "除。" msgid "" "Remove revoke_extension from the paste pipeline, the revoke extension is now " "always available. Update the [pipeline:api_v3] section in keystone-paste.ini " "accordingly, as it will be removed in the O release." msgstr "" "從貼上 Pipeline 中移除 revoke_extension,撤銷延伸現在將一律可用。相應地更新 " "keystone-paste.ini 中的 [pipeline:api_v3] 區段,因為它在 O 版本中將予以移除。" msgid "" "Remove simple_cert from the paste pipeline, the PKI and PKIz token providers " "are now deprecated and simple_cert was only used insupport of these token " "providers. Update the [pipeline:api_v3] section in keystone-paste.ini " "accordingly, as it will be removed in the O release." msgstr "" "從貼上 Pipeline 中移除 simple_cert,PKI 和 PKIz 記號提供者現在已予以淘汰,並" "且使用 simple_cert 的目的只是為了支援這些記號提供者。相應地更新 keystone-" "paste.ini 中的 [pipeline:api_v3] 區段,因為它在 O 版本中將予以移除。" msgid "" "Remove user_crud_extension from the paste pipeline, the user_crud extension " "is now always available. Updatethe [pipeline:public_api] section in keystone-" "paste.ini accordingly, as it will be removed in the O release." msgstr "" "從貼上 Pipeline 中移除 user_crud_extension,user_crud 延伸現在將一律可用。相" "應地更新 keystone-paste.ini 中的 [pipeline:public_api] 區段,因為它在 O 版本" "中將予以移除。" msgid "Request Token does not have an authorizing user id" msgstr "要求記號不具有授權使用者 ID" #, python-format msgid "" "Request attribute %(attribute)s must be less than or equal to %(size)i. The " "server could not comply with the request because the attribute size is " "invalid (too large). The client is assumed to be in error." msgstr "" "要求屬性 %(attribute)s 必須少於或等於 %(size)i。伺服器無法遵守要求,因為屬性" "大小無效(太大)。系統會假定用戶端處於錯誤狀態。" msgid "Request must have an origin query parameter" msgstr "要求必須具有原始查詢參數" msgid "Request token is expired" msgstr "要求記號過期" msgid "Request token not found" msgstr "找不到要求記號" msgid "Requested expiration time is more than redelegated trust can provide" msgstr "所要求的有效期限超過重新委派之信任可提供的有效期限" #, python-format msgid "" "Requested redelegation depth of %(requested_count)d is greater than allowed " "%(max_count)d" msgstr "所要求的重新委派深度 %(requested_count)d 大於容許的 %(max_count)d" msgid "" "Running keystone via eventlet is deprecated as of Kilo in favor of running " "in a WSGI server (e.g. mod_wsgi). Support for keystone under eventlet will " "be removed in the \"M\"-Release." msgstr "" "透過 eventlet 執行 Keystone 這一做法已遭淘汰,因為 Kilo 偏好在 WSGI 伺服器" "(例如,mod_wsgi)中執行 Keystone。將在\"M\" 版本中移除對在 eventlet 下執行 " "Keystone 的支援。" msgid "Scoping to both domain and project is not allowed" msgstr "不容許將範圍同時設定為網域及專案" msgid "Scoping to both domain and trust is not allowed" msgstr "不容許將範圍同時設定為網域及信任" msgid "Scoping to both project and trust is not allowed" msgstr "不容許將範圍同時設定為專案及信任" #, python-format msgid "Service Provider %(sp)s is disabled" msgstr "已停用服務提供者 %(sp)s" msgid "Some of requested roles are not in redelegated trust" msgstr "所要求的部分角色不在重新委派的信任中" msgid "Specify a domain or project, not both" msgstr "指定網域或專案,但不要同時指定兩者" msgid "Specify a user or group, not both" msgstr "指定使用者或群組,但不要同時指定兩者" msgid "Specify one of domain or project" msgstr "指定網域或專案" msgid "Specify one of user or group" msgstr "指定使用者或群組" #, python-format msgid "" "String length exceeded.The length of string '%(string)s' exceeded the limit " "of column %(type)s(CHAR(%(length)d))." msgstr "" "已超出字串長度。字串 '%(string)s' 的長度已超出直欄 %(type)s 的限制 " "(CHAR(%(length)d))。" msgid "Tenant name cannot contain reserved characters." msgstr "租戶名稱不能包含保留字元。" #, python-format msgid "" "The %s extension has been moved into keystone core and as such its " "migrations are maintained by the main keystone database control. Use the " "command: keystone-manage db_sync" msgstr "" "%s 延伸已移到 Keystone 核心內,因此它的移轉將由主要 Keystone 資料庫控制進行維" "護。請使用指令:keystone-manage db_sync" msgid "" "The 'expires_at' must not be before now. The server could not comply with " "the request since it is either malformed or otherwise incorrect. The client " "is assumed to be in error." msgstr "" "'expires_at' 不得早於現在。伺服器無法遵守要求,因為它的格式不正確,或者在其他" "方面發生錯誤。系統會假定用戶端處於錯誤狀態。" msgid "The --all option cannot be used with the --domain-name option" msgstr "--all 選項不能與 --domain-name 選項搭配使用" #, python-format msgid "The Keystone configuration file %(config_file)s could not be found." msgstr "找不到 Keystone 配置檔 %(config_file)s。" #, python-format msgid "" "The Keystone domain-specific configuration has specified more than one SQL " "driver (only one is permitted): %(source)s." msgstr "" "Keystone 網域特定配置指定了多個SQL 驅動程式(僅允許一個):%(source)s。" msgid "The action you have requested has not been implemented." msgstr "尚未實作所要求的動作。" msgid "The authenticated user should match the trustor." msgstr "已鑑別使用者應該與委託人相符。" msgid "" "The certificates you requested are not available. It is likely that this " "server does not use PKI tokens otherwise this is the result of " "misconfiguration." msgstr "" "所要求的憑證無法使用。可能是此伺服器沒有使用 PKI 記號,否則,這是由於配置錯誤" "所造成。" msgid "The configured token provider does not support bind authentication." msgstr "所配置的記號提供者不支援連結鑑別。" msgid "The creation of projects acting as domains is not allowed in v2." msgstr "在第 2 版中,不容許建立專案以充當網域。" #, python-format msgid "" "The password length must be less than or equal to %(size)i. The server could " "not comply with the request because the password is invalid." msgstr "密碼長度必須小於或等於 %(size)i。伺服器無法遵守要求,因為密碼無效。" msgid "The request you have made requires authentication." msgstr "您發出的要求需要鑑別。" msgid "The resource could not be found." msgstr "找不到資源。" msgid "" "The revoke call must not have both domain_id and project_id. This is a bug " "in the Keystone server. The current request is aborted." msgstr "" "撤銷呼叫不得同時具有 domain_id 和 project_id。這是Keystone 伺服器中的錯誤。已" "中斷現行要求。" msgid "The service you have requested is no longer available on this server." msgstr "此伺服器上無法再使用所要求的服務。" #, python-format msgid "" "The specified parent region %(parent_region_id)s would create a circular " "region hierarchy." msgstr "指定的母項區域 %(parent_region_id)s 會建立循環區域階層。" #, python-format msgid "" "The value of group %(group)s specified in the config should be a dictionary " "of options" msgstr "在配置中指定的群組 %(group)s 的值應該為選項字典" msgid "There should not be any non-oauth parameters" msgstr "不應該具有任何 non-oauth 參數" #, python-format msgid "This is not a recognized Fernet payload version: %s" msgstr "這不是已辨識的 Fernet 內容版本:%s" #, python-format msgid "This is not a recognized Fernet token %s" msgstr "這不是已辨識的 Fernet 記號 %s" msgid "" "Timestamp not in expected format. The server could not comply with the " "request since it is either malformed or otherwise incorrect. The client is " "assumed to be in error." msgstr "" "時間戳記的格式不符合預期。伺服器無法遵守要求,因為它的格式不正確。系統會假定" "用戶端處於錯誤狀態。" #, python-format msgid "" "To get a more detailed information on this error, re-run this command for " "the specific domain, i.e.: keystone-manage domain_config_upload --domain-" "name %s" msgstr "" "若要取得此錯誤的更詳細資訊,請針對特定的網域重新執行此指令,例如:keystone-" "manage domain_config_upload --domain-name %s" msgid "Token belongs to another user" msgstr "記號屬於另一個使用者" msgid "Token does not belong to specified tenant." msgstr "記號不屬於所指定的 Tenant。" msgid "Token version is unrecognizable or unsupported." msgstr "無法辨識或不支援記號版本。" msgid "Trustee has no delegated roles." msgstr "受託人沒有委派的角色。" msgid "Trustor is disabled." msgstr "委託人已停用。" #, python-format msgid "" "Trying to update group %(group)s, so that, and only that, group must be " "specified in the config" msgstr "" "正在嘗試更新群組 %(group)s,因此必須在配置中指定該群組且必須僅指定該群組" #, python-format msgid "" "Trying to update option %(option)s in group %(group)s, but config provided " "contains option %(option_other)s instead" msgstr "" "正在嘗試更新群組 %(group)s 中的選項 %(option)s,但提供的配置卻包含選項 " "%(option_other)s" #, python-format msgid "" "Trying to update option %(option)s in group %(group)s, so that, and only " "that, option must be specified in the config" msgstr "" "正在嘗試更新群組 %(group)s 中的選項 %(option)s,因此必須在配置中指定該選項且" "必須僅指定該選項" msgid "" "Unable to access the keystone database, please check it is configured " "correctly." msgstr "無法存取 Keystone 資料庫,請檢查它是否已正確配置。" #, python-format msgid "Unable to consume trust %(trust_id)s, unable to acquire lock." msgstr "無法耗用信任 %(trust_id)s,無法獲得鎖定。" #, python-format msgid "" "Unable to delete region %(region_id)s because it or its child regions have " "associated endpoints." msgstr "無法刪除區域 %(region_id)s,因為此區域或其子區域具有相關聯的端點。" msgid "Unable to downgrade schema" msgstr "無法將綱目降級" #, python-format msgid "Unable to find valid groups while using mapping %(mapping_id)s" msgstr "使用對映 %(mapping_id)s 時找不到有效的群組" #, python-format msgid "Unable to locate domain config directory: %s" msgstr "找不到網域配置目錄:%s" #, python-format msgid "Unable to lookup user %s" msgstr "無法查閱使用者 %s" #, python-format msgid "" "Unable to reconcile identity attribute %(attribute)s as it has conflicting " "values %(new)s and %(old)s" msgstr "" "無法核對身分屬性 %(attribute)s,因為該屬性具有衝突的值 %(new)s 和 %(old)s" #, python-format msgid "" "Unable to sign SAML assertion. It is likely that this server does not have " "xmlsec1 installed, or this is the result of misconfiguration. Reason " "%(reason)s" msgstr "" "無法簽署 SAML 主張。此伺服器可能未安裝xmlsec1,或者這是配置錯誤的結果。原" "因: %(reason)s" msgid "Unable to sign token." msgstr "無法簽署記號。" #, python-format msgid "Unexpected assignment type encountered, %s" msgstr "發現非預期的指派類型:%s" #, python-format msgid "" "Unexpected combination of grant attributes - User: %(user_id)s, Group: " "%(group_id)s, Project: %(project_id)s, Domain: %(domain_id)s" msgstr "" "非預期的授權屬性組合 - 使用者:%(user_id)s,群組:%(group_id)s,專案:" "%(project_id)s,網域:%(domain_id)s" #, python-format msgid "Unexpected status requested for JSON Home response, %s" msgstr "針對「JSON 起始目錄」回應要求了非預期狀態 %s" msgid "Unknown Target" msgstr "不明的目標" #, python-format msgid "Unknown domain '%(name)s' specified by --domain-name" msgstr "由 --domain-name 指定的網域 '%(name)s' 不明" #, python-format msgid "Unknown token version %s" msgstr "不明的記號版本 %s" #, python-format msgid "Unregistered dependency: %(name)s for %(targets)s" msgstr "已取消登錄 %(targets)s 的相依關係:%(name)s" msgid "Update of `domain_id` is not allowed." msgstr "不容許更新 'domain_id'。" msgid "Update of `is_domain` is not allowed." msgstr "不容許更新 `is_domain`。" msgid "Update of `parent_id` is not allowed." msgstr "不容許更新 'parent_id'。" msgid "Update of domain_id is only allowed for root projects." msgstr "只容許更新根專案的 domain_id。" msgid "Update of domain_id of projects acting as domains is not allowed." msgstr "不容許更新正在充當網域之專案的 domain_id。" msgid "Use a project scoped token when attempting to create a SAML assertion" msgstr "嘗試建立 SAML 主張時,使用專案範圍的記號" msgid "" "Use of the identity driver config to automatically configure the same " "assignment driver has been deprecated, in the \"O\" release, the assignment " "driver will need to be expicitly configured if different than the default " "(SQL)." msgstr "" "不建議使用身分驅動程式配置來自動配置相同的指派驅動程式,在 \"O\" 版本中,如果" "指派驅動程式與預設值 (SQL) 不同,則需要明確配置指派驅動程式。" #, python-format msgid "User %(u_id)s is unauthorized for tenant %(t_id)s" msgstr "使用者 %(u_id)s 未獲承租人 %(t_id)s 的授權" #, python-format msgid "User %(user_id)s has no access to domain %(domain_id)s" msgstr "使用者 %(user_id)s 無法存取網域 %(domain_id)s" #, python-format msgid "User %(user_id)s has no access to project %(project_id)s" msgstr "使用者 %(user_id)s 無法存取專案 %(project_id)s" #, python-format msgid "User %(user_id)s is already a member of group %(group_id)s" msgstr "使用者 %(user_id)s 已是群組 %(group_id)s 的成員" #, python-format msgid "User '%(user_id)s' not found in group '%(group_id)s'" msgstr "在群組 '%(group_id)s' 中找不到使用者 '%(user_id)s'" msgid "User IDs do not match" msgstr "使用者 ID 不符" msgid "" "User auth cannot be built due to missing either user id, or user name with " "domain id, or user name with domain name." msgstr "" "無法建置使用者鑑別,因為遺漏了使用者 ID、具有網域 ID 的使用者名稱或具有網域名" "稱的使用者名稱。" #, python-format msgid "User is disabled: %s" msgstr "已停用使用者:%s" msgid "User is not a member of the requested project" msgstr "使用者並不隸屬於所要求的專案" msgid "User is not a trustee." msgstr "使用者不是受託人。" msgid "User not found" msgstr "找不到使用者" msgid "User not valid for tenant." msgstr "使用者不是有效的承租人。" msgid "User roles not supported: tenant_id required" msgstr "使用者角色不受支援:需要 tenant_id" #, python-format msgid "User type %s not supported" msgstr "使用者類型 %s 不受支援" msgid "You are not authorized to perform the requested action." msgstr "您未獲授權來執行所要求的動作。" #, python-format msgid "You are not authorized to perform the requested action: %(action)s" msgstr "您未獲授權來執行所要求的動作:%(action)s" msgid "" "You have tried to create a resource using the admin token. As this token is " "not within a domain you must explicitly include a domain for this resource " "to belong to." msgstr "" "您已嘗試使用管理者記號建立資源。因為此記號不在網域內,所以您必須明確包含某個" "網域,以讓此資源屬於該網域。" msgid "`key_mangler` functions must be callable." msgstr "`key_mangler` 函數必須可呼叫。" msgid "`key_mangler` option must be a function reference" msgstr "`key_mangler` 選項必須是函數參照" msgid "any options" msgstr "任何選項" msgid "auth_type is not Negotiate" msgstr "auth_type 不是 Negotiate" msgid "authorizing user does not have role required" msgstr "授權使用者不具有必要的角色" #, python-format msgid "cannot create a project in a branch containing a disabled project: %s" msgstr "無法在包含已停用專案的分支中建立專案:%s" #, python-format msgid "" "cannot delete an enabled project acting as a domain. Please disable the " "project %s first." msgstr "無法刪除已啟用且正在充當網域的專案。請先停用專案 %s。" #, python-format msgid "group %(group)s" msgstr "群組 %(group)s" msgid "" "idp_contact_type must be one of: [technical, other, support, administrative " "or billing." msgstr "idp_contact_type 必須是下列其中一個:技術、其他、支援、管理或計費。" #, python-format msgid "invalid date format %s" msgstr "無效的日期格式 %s" #, python-format msgid "" "it is not permitted to have two projects acting as domains with the same " "name: %s" msgstr "不允許包含兩個具有相同名稱且充當網域的專案:%s" #, python-format msgid "" "it is not permitted to have two projects within a domain with the same " "name : %s" msgstr "在一個網域內,不允許包含兩個具有相同名稱的專案:%s" msgid "only root projects are allowed to act as domains." msgstr "只容許根專案充當網域。" #, python-format msgid "option %(option)s in group %(group)s" msgstr "群組 %(group)s 中的選項 %(option)s" msgid "provided consumer key does not match stored consumer key" msgstr "提供的消費者金鑰,與儲存的消費者金鑰不符" msgid "provided request key does not match stored request key" msgstr "提供的要求金鑰,與儲存的要求金鑰不符" msgid "provided verifier does not match stored verifier" msgstr "提供的驗證器,與儲存的驗證器不符" msgid "remaining_uses must be a positive integer or null." msgstr "remaining_uses 必須是正整數或空值。" msgid "remaining_uses must not be set if redelegation is allowed" msgstr "如果容許重新委派,則不得設定 remaining_uses" #, python-format msgid "" "request to update group %(group)s, but config provided contains group " "%(group_other)s instead" msgstr "要求更新群組 %(group)s,但提供的配置卻包含群組 %(group_other)s" msgid "rescope a scoped token" msgstr "重新劃定已限定範圍之記號的範圍" #, python-format msgid "role %s is not defined" msgstr "未定義角色 %s" msgid "scope.project.id must be specified if include_subtree is also specified" msgstr "如果也指定了 include_subtree,則必須指定 scope.project.id" #, python-format msgid "tls_cacertdir %s not found or is not a directory" msgstr "tls_cacertdir %s 找不到,或者不是目錄" #, python-format msgid "tls_cacertfile %s not found or is not a file" msgstr "tls_cacertfile %s 找不到,或者不是檔案" #, python-format msgid "token reference must be a KeystoneToken type, got: %s" msgstr "記號參照必須是 KeystoneToken 類型,但卻取得:%s" msgid "" "update of domain_id is deprecated as of Mitaka and will be removed in O." msgstr "不建議更新 domain_id,因為 Mitaka 將在 O 版本中予以移除。" #, python-format msgid "" "validated expected to find %(param_name)r in function signature for " "%(func_name)r." msgstr "在下列函數的函數簽章中,驗證預期尋找 %(param_name)r:%(func_name)r。" keystone-9.0.0/keystone/locale/ko_KR/0000775000567000056710000000000012701407246020632 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/locale/ko_KR/LC_MESSAGES/0000775000567000056710000000000012701407246022417 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/locale/ko_KR/LC_MESSAGES/keystone-log-error.po0000664000567000056710000001410012701407105026514 0ustar jenkinsjenkins00000000000000# Translations template for keystone. # Copyright (C) 2015 OpenStack Foundation # This file is distributed under the same license as the keystone project. # # Translators: # OpenStack Infra , 2015. #zanata # SeYeon Lee , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: keystone 9.0.0.0rc2.dev8\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n" "POT-Creation-Date: 2016-03-24 10:41+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-24 01:53+0000\n" "Last-Translator: SeYeon Lee \n" "Language: ko-KR\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Korean (South Korea)\n" #, python-format msgid "" "Asked to convert a non-domain project into a domain - Domain: %(domain_id)s, " "Project ID: %(id)s, Project Name: %(project_name)s" msgstr "" "비도메인 프로젝트를 도메인으로 변환하도록 요청 - 도메인: %(domain_id)s, 프로" "젝트 ID: %(id)s, 프로젝트 이름: %(project_name)s" msgid "Cannot retrieve Authorization headers" msgstr "인증 헤더를 검색할 수 없음" #, python-format msgid "Circular reference found role inference rules - %(prior_role_id)s." msgstr "순환 참조에서 역할 추론 규칙 발견 - %(prior_role_id)s." #, python-format msgid "" "Circular reference or a repeated entry found in projects hierarchy - " "%(project_id)s." msgstr "" "프로젝트 계층 - %(project_id)s에서 순환 참조 또는 반복 항목을 발견했습니다." #, python-format msgid "" "Circular reference or a repeated entry found in region tree - %(region_id)s." msgstr "지역 트리에서 순환 참조 또는 반복 항목이 발견됨 - %(region_id)s." #, python-format msgid "" "Circular reference or a repeated entry found projects hierarchy - " "%(project_id)s." msgstr "순환 참조 또는 반복 항목에서 프로젝트 계층을 발견 - %(project_id)s." #, python-format msgid "Command %(to_exec)s exited with %(retcode)s - %(output)s" msgstr "명령 %(to_exec)s이(가) 종료되고 %(retcode)s - %(output)s이(가) 표시됨" #, python-format msgid "Could not bind to %(host)s:%(port)s" msgstr "%(host)s:%(port)s에 바인드할 수 없음" #, python-format msgid "" "Either [fernet_tokens] key_repository does not exist or Keystone does not " "have sufficient permission to access it: %s" msgstr "" "[fernet_tokens] key_repository가 없거나 Keystone에서 액세스할 권한이 충분하" "지 않음: %s" msgid "" "Error setting up the debug environment. Verify that the option --debug-url " "has the format : and that a debugger processes is listening on " "that port." msgstr "" "디버그 환경을 설정하는 중에 오류가 발생했습니다. --debug-url 옵션에 :" " 형식이 있으며 디버거 프로세스가 해당 포트에서 청취 중인지 확인하십시" "오." #, python-format msgid "Error when signing assertion, reason: %(reason)s%(output)s" msgstr "어설션에 서명할 때 오류 발생, 이유: %(reason)s%(output)s" msgid "Failed to construct notifier" msgstr "알리미를 구성하는 데 실패" msgid "" "Failed to create [fernet_tokens] key_repository: either it already exists or " "you don't have sufficient permissions to create it" msgstr "" "[fernet_tokens] key_repository 생성 실패: 이미 있거나 생성할 권한이 충분하지 " "않음" msgid "Failed to create the default domain." msgstr "기본 도메인을 생성하지 못했습니다." #, python-format msgid "Failed to remove file %(file_path)r: %(error)s" msgstr "파일 %(file_path)r을(를) 제거하는 데 실패: %(error)s" #, python-format msgid "Failed to send %(action)s %(event_type)s notification" msgstr "%(action)s %(event_type)s 알림을 보내는 데 실패" #, python-format msgid "Failed to send %(res_id)s %(event_type)s notification" msgstr "%(res_id)s %(event_type)s 알림을 보내는 데 실패" msgid "Failed to validate token" msgstr "토큰을 유효성 검증하지 못했음" #, python-format msgid "Malformed endpoint %(url)s - unknown key %(keyerror)s" msgstr "형식이 잘못된 엔드포인트 %(url)s - 알 수 없는 키 %(keyerror)s" #, python-format msgid "" "Malformed endpoint %s - incomplete format (are you missing a type notifier ?)" msgstr "" "잘못된 형식의 엔드포인트 %s - 불완전한 형식(유형 알리미가 누락되었습니까?)" #, python-format msgid "" "Malformed endpoint '%(url)s'. The following type error occurred during " "string substitution: %(typeerror)s" msgstr "" "잘못된 형식의 엔드포인트 '%(url)s'입니다. 문자열 대체 중에 다음 입력 오류 발" "생: %(typeerror)s" #, python-format msgid "Malformed endpoint - %(url)r is not a string" msgstr "잘못된 형식의 엔드포인트 - %(url)r이(가) 문자열이 아님" #, python-format msgid "" "Reinitializing revocation list due to error in loading revocation list from " "backend. Expected `list` type got `%(type)s`. Old revocation list data: " "%(list)r" msgstr "" "백엔드에서 취소 목록을 로드하는 중에 발생한 오류로 인해 취소 목록을 다시 초기" "화합니다. 예상되는`list` 유형이 `%(type)s`이(가) 되었습니다. 이전 취소 목록 " "데이터: %(list)r" msgid "Server error" msgstr "서버 오류" #, python-format msgid "Unable to convert Keystone user or group ID. Error: %s" msgstr "Keystone 사용자 또는 그룹 ID를 변환할 수 없습니다. 오류: %s" msgid "Unable to sign token" msgstr "토큰에 서명할 수 없음" #, python-format msgid "Unexpected error or malformed token determining token expiry: %s" msgstr "토큰 만료를 판별하는 잘못된 형식의 토큰 또는 예상치 못한 오류: %s" #, python-format msgid "" "Unexpected results in response for domain config - %(count)s responses, " "first option is %(option)s, expected option %(expected)s" msgstr "" "도메인 구성에 대한 응답의 예기치 않은 결과 - %(count)s 응답, 첫 번째 옵션 " "%(option)s, 예상 옵션 %(expected)s" keystone-9.0.0/keystone/locale/ko_KR/LC_MESSAGES/keystone-log-critical.po0000664000567000056710000000156312701407102027163 0ustar jenkinsjenkins00000000000000# Translations template for keystone. # Copyright (C) 2015 OpenStack Foundation # This file is distributed under the same license as the keystone project. # # Translators: # OpenStack Infra , 2015. #zanata msgid "" msgstr "" "Project-Id-Version: keystone 9.0.0.0b2.dev256\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n" "POT-Creation-Date: 2016-01-18 05:50+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2014-08-31 03:19+0000\n" "Last-Translator: openstackjenkins \n" "Language: ko-KR\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Korean (South Korea)\n" #, python-format msgid "Unable to open template file %s" msgstr "템플리트 파일 %s을(를) 열 수 없음" keystone-9.0.0/keystone/locale/ko_KR/LC_MESSAGES/keystone-log-warning.po0000664000567000056710000003263612701407105027046 0ustar jenkinsjenkins00000000000000# Translations template for keystone. # Copyright (C) 2015 OpenStack Foundation # This file is distributed under the same license as the keystone project. # # Translators: # Yongbok Kim , 2015 # OpenStack Infra , 2015. #zanata # SeYeon Lee , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: keystone 9.0.0.0rc2.dev8\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n" "POT-Creation-Date: 2016-03-24 10:41+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-24 02:55+0000\n" "Last-Translator: SeYeon Lee \n" "Language: ko-KR\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Korean (South Korea)\n" #, python-format msgid "%s is not a dogpile.proxy.ProxyBackend" msgstr "%s이(가) dogpile.proxy.ProxyBackend가 아님" msgid "'local conf' from PasteDeploy INI is being ignored." msgstr "PasteDeploy INI의 'local conf'가 무시됩니다." msgid "" "Auth context already exists in the request environment; it will be used for " "authorization instead of creating a new one." msgstr "" "요청 환경에 인증 컨텍스트가 이미 있습니다. 새로 생성하지 않고 이 인증 컨텍스" "트를 인증에 사용합니다." #, python-format msgid "Authorization failed. %(exception)s from %(remote_addr)s" msgstr "%(remote_addr)s 에서 %(exception)s 인증에 실패 하였습니다." msgid "Couldn't find the auth context." msgstr "인증 컨텍스트를 찾을 수 없습니다." #, python-format msgid "" "Endpoint %(endpoint_id)s referenced in association for policy %(policy_id)s " "not found." msgstr "" "정책 %(policy_id)s의 연관에서 참조되는 엔드포인트 %(endpoint_id)s을(를) 찾을 " "수 없습니다." msgid "Failed to invoke ``openssl version``, assuming is v1.0 or newer" msgstr "v1.0 이상이라고 가정하여 ``openssl version``을 호출하는 데 실패" #, python-format msgid "" "Found multiple domains being mapped to a driver that does not support that " "(e.g. LDAP) - Domain ID: %(domain)s, Default Driver: %(driver)s" msgstr "" "여러 도메인이 드라이버에 맵핑되어 있음을 발견했지만, 이 드라이버에서 이 기능" "을 지원하지 않음(예: LDAP) - 도메인 ID: %(domain)s, 기본 드라이버: %(driver)s" #, python-format msgid "" "Found what looks like an incorrectly constructed config option substitution " "reference - domain: %(domain)s, group: %(group)s, option: %(option)s, value: " "%(value)s." msgstr "" "잘못 구성된 구성 옵션 대체 참조 발견 - 도메인: %(domain)s, 그룹: %(group)s, " "옵션: %(option)s, 값: %(value)s." #, python-format msgid "" "Found what looks like an unmatched config option substitution reference - " "domain: %(domain)s, group: %(group)s, option: %(option)s, value: %(value)s. " "Perhaps the config option to which it refers has yet to be added?" msgstr "" "일치하지 않는 구성 옵션 대체 발견 - 도메인: %(domain)s, 그룹: %(group)s, 옵" "션: %(option)s, 값: %(value)s. 참조하는 구성 옵션이 이미 추가되었을 가능성이 " "있습니다." #, python-format msgid "" "ID attribute %(id_attr)s for LDAP object %(dn)s has multiple values and " "therefore cannot be used as an ID. Will get the ID from DN instead" msgstr "" "LDAP 오브젝트 %(dn)s의 ID 속성 %(id_attr)s 값이 여러 개이므로, ID로 사용할 " "수 없습니다. 대신 DN에서 ID를 얻습니다." #, python-format msgid "Ignoring file (%s) while scanning domain config directory" msgstr "도메인 구성 디렉토리를 스캔하는 중에 파일(%s) 무시" msgid "Ignoring user name" msgstr "사용자 이름 무시" #, python-format msgid "" "Invalid additional attribute mapping: \"%s\". Format must be " ":" msgstr "" "잘못된 추가 속성 맵핑:\" %s\". 형식은 :" #, python-format msgid "Invalid domain name (%s) found in config file name" msgstr "설정 파일 이름에 잘못된 도메인 이름(%s)을 찾았습니다." msgid "" "It is recommended to only use the base key-value-store implementation for " "the token driver for testing purposes. Please use 'memcache' or 'sql' " "instead." msgstr "" "테스트용으로만 토큰 드라이버의 기본 key-value-store 구현을 사용하는 것이 좋습" "니다. 대신 'memcache' 또는 'sql'을 사용하십시오." #, python-format msgid "KVS lock released (timeout reached) for: %s" msgstr "%s에 대한 KVS 잠금이 해제됨(제한시간에 도달)" msgid "" "LDAP Server does not support paging. Disable paging in keystone.conf to " "avoid this message." msgstr "" "LDAP 서버가 페이징을 지원하지 않습니다. 이 메시지를 방지하려면 keystone.conf" "에서 페이징을 사용 안함으로 설정하십시오." msgid "No domain information specified as part of list request" msgstr "목록 요청의 일부로 도메인 정보가 지정되지 않음" msgid "" "Not specifying a domain during a create user, group or project call, and " "relying on falling back to the default domain, is deprecated as of Liberty " "and will be removed in the N release. Specify the domain explicitly or use a " "domain-scoped token" msgstr "" "사용자, 그룹 또는 프로젝트 호출 생성 중에 도메인을 지정하지 않고, 기본 도메인" "으로 다시 돌아가는 기능은 Liberty에서는 더 이상 사용되지 않으므로 N 릴리스에" "서 제거됩니다. 도메인을 명시적으로 지정하거나 도메인 범위 토큰을 사용하십시" "오." #, python-format msgid "" "Policy %(policy_id)s referenced in association for endpoint %(endpoint_id)s " "not found." msgstr "" "엔드포인트 %(endpoint_id)s의 연관에서 참조되는 정책 %(policy_id)s을(를) 찾을 " "수 없습니다." #, python-format msgid "Project %s does not exist and was not deleted." msgstr "프로젝트 %s이(가) 없으므로 삭제되지 않았습니다." msgid "RBAC: Bypassing authorization" msgstr "RBAC: 권한 무시" msgid "RBAC: Invalid token" msgstr "RBAC: 올바르지 않은 토큰" msgid "RBAC: Invalid user data in token" msgstr "RBAC: 토큰에 잘못된 사용자 데이터" #, python-format msgid "" "Removing `%s` from revocation list due to invalid expires data in revocation " "list." msgstr "" "유효하지 않아 취소 목록에서 `%s`을(를) 제거하면 취소 목록의 데이터가 만료됩니" "다." msgid "" "The admin_token_auth middleware presents a security risk and should be " "removed from the [pipeline:api_v3], [pipeline:admin_api], and [pipeline:" "public_api] sections of your paste ini file." msgstr "" "admin_token_auth 미들웨어에서는 보안 위험이 제기되므로 paste ini 파일의 " "[pipeline:api_v3], [pipeline:admin_api] 및 [pipeline:public_api] 섹션에서 제" "거해야 합니다." msgid "" "The default domain was created automatically to contain V2 resources. This " "is deprecated in the M release and will not be supported in the O release. " "Create the default domain manually or use the keystone-manage bootstrap " "command." msgstr "" "V2 자원을 포함하도록 기본 도메인이 자동으로 생성되었습니다. 이 기능은 M 릴리" "스에서 더 이상 사용되지 않으며 O 릴리스에서 지원되지 않습니다. 수동으로 기본 " "도메인을 생성하거나 keystone-manage 부트스트랩 명령을 사용하십시오." #, python-format msgid "Token `%s` is expired, not adding to the revocation list." msgstr "토큰 `%s`를 해지 목록에 추가 하지 않으면 만료 됩니다." #, python-format msgid "Truncating user password to %d characters." msgstr "사용자 비밀번호를 %d자로 자릅니다." #, python-format msgid "Unable to add user %(user)s to %(tenant)s." msgstr "%(tenant)s 에 사용자 %(user)s 를 추가 할 수 없습니다." #, python-format msgid "" "Unable to change the ownership of [fernet_tokens] key_repository without a " "keystone user ID and keystone group ID both being provided: %s" msgstr "" "keystone 사용자 ID와 keystone 그룹 ID가 모두 제공되지 않으면 [fernet_tokens] " "key_repository의 소유권은 변경할 수 없음: %s" #, python-format msgid "" "Unable to change the ownership of the new key without a keystone user ID and " "keystone group ID both being provided: %s" msgstr "" "keystone 사용자 ID와 keystone 그룹 ID가 모두 제공되지 않으면 새 키의 소유권" "을 변경할 수 없음: %s" #, python-format msgid "Unable to locate domain config directory: %s" msgstr "%s: 도메인 설정 디렉토리를 찾을 수 없습니다." #, python-format msgid "Unable to remove user %(user)s from %(tenant)s." msgstr "%(tenant)s 에서 %(user)s 를 제거 할 수 없습니다." #, python-format msgid "" "Unsupported policy association found - Policy %(policy_id)s, Endpoint " "%(endpoint_id)s, Service %(service_id)s, Region %(region_id)s, " msgstr "" "지원되지 않는 정책 연관 발견 - 정책 %(policy_id)s, 엔드포인트 " "%(endpoint_id)s, 서비스 %(service_id)s, 지역 %(region_id)s, " #, python-format msgid "" "User %(user_id)s doesn't have access to default project %(project_id)s. The " "token will be unscoped rather than scoped to the project." msgstr "" "사용자 %(user_id)s이(가) 기본 프로젝트 %(project_id)s에 대한 액세스 권한이 없" "습니다. 토큰의 범위가 프로젝트로 지정되지 않고 범위 지정이 해제됩니다." #, python-format msgid "" "User %(user_id)s's default project %(project_id)s is disabled. The token " "will be unscoped rather than scoped to the project." msgstr "" "%(user_id)s 사용자의 기본 프로젝트 %(project_id)s을(를) 사용하지 않습니다. 토" "큰의 범위가 프로젝트로 지정되지 않고 범위 지정이 해제됩니다." #, python-format msgid "" "User %(user_id)s's default project %(project_id)s not found. The token will " "be unscoped rather than scoped to the project." msgstr "" "사용자 %(user_id)s의 기본 프로젝트 %(project_id)s을(를) 찾을 수 없습니다. 토" "큰의 범위가 프로젝트로 지정되지 않고 범위 지정이 해제됩니다." #, python-format msgid "" "When deleting entries for %(search_base)s, could not delete nonexistent " "entries %(entries)s%(dots)s" msgstr "" "%(search_base)s의 항목을 삭제할 때 존재하지 않는 항목 %(entries)s%(dots)s을" "(를) 삭제할 수 없음" #, python-format msgid "[fernet_tokens] key_repository is world readable: %s" msgstr "[fernet_tokens] key_repository는 읽을 수 있음: %s" msgid "" "[fernet_tokens] max_active_keys must be at least 1 to maintain a primary key." msgstr "" "기본 키를 유지 보수하려면 [fernet_tokens] max_active_keys가 최소 1이어야 합니" "다." #, python-format msgid "" "`token_api.%s` is deprecated as of Juno in favor of utilizing methods on " "`token_provider_api` and may be removed in Kilo." msgstr "" "Juno에서는 `token_provider_api`의 메소드를 활용하기 위해 `token_api.%s`이" "(가) 더 이상 사용되지 않으므로 Kilo에서 제거될 수 있습니다." msgid "" "build_auth_context middleware checking for the admin token is deprecated as " "of the Mitaka release and will be removed in the O release. If your " "deployment requires use of the admin token, update keystone-paste.ini so " "that admin_token_auth is before build_auth_context in the paste pipelines, " "otherwise remove the admin_token_auth middleware from the paste pipelines." msgstr "" "build_auth_context 미들웨어에서 관리 토큰을 확인하는 기능은 Mitaka 릴리스에" "서 더 이상 사용되지 않으므로, O 릴리스에서 제거됩니다. 배포에서 관리 토큰을 " "사용해야 하는 경우 붙여넣기 파이프라인에서 build_auth_context 전에 " "admin_token_auth가 오도록 keystone-paste.ini를 업데이트하십시오. 그렇지 않으" "면 붙여넣기 파이프라인에서 admin_token_auth 미들웨어를 제거하십시오." #, python-format msgid "" "delete_domain_assignments method not found in custom assignment driver. " "Domain assignments for domain (%s) to users from other domains will not be " "removed. This was added in V9 of the assignment driver." msgstr "" "사용자 정의 할당 드라이버에서 delete_domain_assignments 메소드를 찾을 수 없습" "니다. 다른 도메인의 사용자에게 할당한 도메인(%s)은 제거되지 않습니다. 이 기능" "은 할당 드라이버의 V9에서 추가되었습니다." msgid "" "insecure_debug is enabled so responses may include sensitive information." msgstr "insecure_debug가 사용되므로 응답에 민감한 정보가 포함될 수 있습니다." msgid "" "keystone-manage pki_setup is deprecated as of Mitaka in favor of not using " "PKI tokens and may be removed in 'O' release." msgstr "" " Mitaka에서 PKI 토큰을 사용하지 않기 위해 keystone-manage pki_setup이 더 이" "상 사용되지 않으므로, 'O' 릴리스에서 제거할 수 있습니다." msgid "keystone-manage pki_setup is not recommended for production use." msgstr "keystone-manage pki_setup은 프로덕션에서 사용하지 않는 것이 좋습니다.." msgid "keystone-manage ssl_setup is not recommended for production use." msgstr "keystone-manage ssl_setup은 프로덕션에서 사용하지 않는 것이 좋습니다." msgid "missing exception kwargs (programmer error)" msgstr "누락된 예외 kwargs(프로그래머 오류)" keystone-9.0.0/keystone/locale/ko_KR/LC_MESSAGES/keystone-log-info.po0000664000567000056710000001522312701407105026325 0ustar jenkinsjenkins00000000000000# Translations template for keystone. # Copyright (C) 2015 OpenStack Foundation # This file is distributed under the same license as the keystone project. # # Translators: # Yongbok Kim , 2015 # OpenStack Infra , 2015. #zanata # SeYeon Lee , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: keystone 9.0.0.0rc2.dev8\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n" "POT-Creation-Date: 2016-03-24 10:41+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-24 02:14+0000\n" "Last-Translator: SeYeon Lee \n" "Language: ko-KR\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Korean (South Korea)\n" #, python-format msgid "" "\"expires_at\" has conflicting values %(existing)s and %(new)s. Will use " "the earliest value." msgstr "" "\"expires_at\"에 충돌되는 값 %(existing)s 및 %(new)s이(가) 있습니다. 가장 이" "른 값을 사용합니다." #, python-format msgid "Adding proxy '%(proxy)s' to KVS %(name)s." msgstr "KVS %(name)s에 프록시 '%(proxy)s'을(를) 추가합니다." #, python-format msgid "Cannot find client issuer in env by the issuer attribute - %s." msgstr "" "발행자 속성 - %s을(를) 사용하여 환경에서 클라이언트 발행자를 찾을 수 없습니" "다." #, python-format msgid "Couldn't verify unknown bind: {%(bind_type)s: %(identifier)s}" msgstr "알 수 없는 바인드를 확인할 수 없음: {%(bind_type)s: %(identifier)s}" #, python-format msgid "Created %(interface)s endpoint %(url)s" msgstr "%(interface)s 엔드포인트 %(url)s이(가)생성됨" #, python-format msgid "Created Region %s" msgstr "지역 %s이(가) 생성됨" #, python-format msgid "Created Role %s" msgstr "역할 %s이(가) 생성됨" #, python-format msgid "Created a new key: %s" msgstr "새로운 키 생성: %s" #, python-format msgid "Created domain %s" msgstr "도메인 %s이(가) 생성됨" #, python-format msgid "Created project %s" msgstr "프로젝트 %s이(가) 생성됨" #, python-format msgid "Created user %s" msgstr "사용자 \"%s\"이(가) 생성됨" #, python-format msgid "Creating the default role %s because it does not exist." msgstr "기본 역할 %s이(가) 없으므로 작성합니다." #, python-format msgid "Creating the default role %s failed because it was already created" msgstr "기본 역할 %s이(가) 이미 생성되었으므로 작성에 실패" #, python-format msgid "Current primary key is: %s" msgstr "현재 기본 키: %s" #, python-format msgid "Domain %s already exists, skipping creation." msgstr "도메인 %s이(가) 이미 있으므로, 생성을 건너뜁니다." #, python-format msgid "Excess key to purge: %s" msgstr "제거할 초과 키: %s" #, python-format msgid "" "Fernet token created with length of %d characters, which exceeds 255 " "characters" msgstr "길이가 255자를 초과하는 %d자로 Fernet 토큰이 생성됨" #, python-format msgid "Granted %(role)s on %(project)s to user %(username)s." msgstr "" "%(project)s에 대한 %(role)s이(가) 사용자 %(username)s에 부여되었습니다." #, python-format msgid "KVS region %s key_mangler disabled." msgstr "KVS 지역 %s key_mangler가 사용되지 않습니다." msgid "Kerberos bind authentication successful" msgstr "Kerberos 바인드 인증 성공" msgid "Kerberos credentials do not match those in bind" msgstr "Kerberos 자격 증명이 바인드에 있는 자격 증명과 일치하지 않음" msgid "Kerberos credentials required and not present" msgstr "Kerberos 자격 증명이 필요하지만 없음" msgid "Key repository is already initialized; aborting." msgstr "키 저장소가 이미 초기화되었습니다. 중단합니다." #, python-format msgid "" "Loaded %(count)d encryption keys (max_active_keys=%(max)d) from: %(dir)s" msgstr "%(dir)s에서 %(count)d 암호화 키(max_active_keys=%(max)d)를 로드함" #, python-format msgid "Named bind mode %s not in bind information" msgstr "바인드 정보에 이름 지정된 바인드 모드 %s이(가) 없음" #, python-format msgid "Next primary key will be: %s" msgstr "다음 기본 키: %s" msgid "No bind information present in token" msgstr "토큰에 바인드 정보가 없음" #, python-format msgid "Project %s already exists, skipping creation." msgstr "프로젝트 %s이(가) 이미 있으므로, 생성을 건너뜁니다." #, python-format msgid "Promoted key 0 to be the primary: %s" msgstr "승격된 키 0이 기본이 됨: %s" #, python-format msgid "Region %s exists, skipping creation." msgstr "지역 %s이(가) 이미 있으므로, 생성을 건너뜁니다." #, python-format msgid "Role %s exists, skipping creation." msgstr "역할 %s이(가) 이미 있으므로, 생성을 건너뜁니다." #, python-format msgid "Running command - %s" msgstr "%s - 명령 실행" #, python-format msgid "Scanning %r for domain config files" msgstr "%r에서 도메인 구성 파일 스캔" #, python-format msgid "Skipping %s endpoint as already created" msgstr "%s 엔드포인트가 이미 생성되었으므로 건너뜀" #, python-format msgid "Starting %(arg0)s on %(host)s:%(port)s" msgstr "%(host)s:%(port)s에서 %(arg0)s 시작 중" #, python-format msgid "Starting key rotation with %(count)s key files: %(list)s" msgstr "%(count)s 키 파일로 키 순환 시작: %(list)s" #, python-format msgid "" "The client issuer %(client_issuer)s does not match with the trusted issuer " "%(trusted_issuer)s" msgstr "" "클라이언트 발행자 %(client_issuer)s이(가) 신뢰할 수 있는 발행자 " "%(trusted_issuer)s과(와) 일치하지 않음" #, python-format msgid "Total expired tokens removed: %d" msgstr "제거된 만료 토큰 총계: %d" #, python-format msgid "User %(username)s already has %(role)s on %(project)s." msgstr "" "사용자 %(username)s이(가) 이미 %(project)s에 대한 %(role)s이(가) 있습니다." #, python-format msgid "User %s already exists, skipping creation." msgstr "사용자 %s이(가) 이미 있으므로, 생성을 건너뜁니다." #, python-format msgid "Using %(func)s as KVS region %(name)s key_mangler" msgstr "%(func)s을(를) KVS region %(name)s key_mangler(으)로 사용" #, python-format msgid "" "Using default keystone.common.kvs.sha1_mangle_key as KVS region %s " "key_mangler" msgstr "" "기본 keystone.common.kvs.sha1_mangle_key을(를) KVS 지역 %s key_mangler(으)로 " "사용" msgid "" "[fernet_tokens] key_repository does not appear to exist; attempting to " "create it" msgstr "" "[fernet_tokens] key_repository가 없는 것으로 보입니다. 생성하려고 시도합니다." keystone-9.0.0/keystone/locale/ko_KR/LC_MESSAGES/keystone.po0000664000567000056710000016136112701407105024622 0ustar jenkinsjenkins00000000000000# Translations template for keystone. # Copyright (C) 2015 OpenStack Foundation # This file is distributed under the same license as the keystone project. # # Translators: # Sungjin Kang , 2013 # Sungjin Kang , 2013 # Lucas Palm , 2015. #zanata # OpenStack Infra , 2015. #zanata # Lucas Palm , 2016. #zanata # SeYeon Lee , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: keystone 9.0.0.0rc2.dev8\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n" "POT-Creation-Date: 2016-03-24 10:41+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-24 01:32+0000\n" "Last-Translator: SeYeon Lee \n" "Language: ko-KR\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Korean (South Korea)\n" #, python-format msgid "%(detail)s" msgstr "%(detail)s" #, python-format msgid "%(driver)s is not supported driver version" msgstr "%(driver)s은(는) 지원되는 드라이버 버전이 아님" #, python-format msgid "" "%(entity)s name cannot contain the following reserved characters: %(chars)s" msgstr "%(entity)s 이름에는 다음과 같은 예약 문자가 포함될 수 없음: %(chars)s" #, python-format msgid "" "%(event)s is not a valid notification event, must be one of: %(actions)s" msgstr "" "%(event)s은(는) 올바른 알림 이벤트가 아니며 %(actions)s 중 하나여야 합니다." #, python-format msgid "%(host)s is not a trusted dashboard host" msgstr "%(host)s이(가) 신뢰 대시보드 호스트가 아님" #, python-format msgid "%(message)s %(amendment)s" msgstr "%(message)s %(amendment)s" #, python-format msgid "" "%(mod_name)s doesn't provide database migrations. The migration repository " "path at %(path)s doesn't exist or isn't a directory." msgstr "" "%(mod_name)s은(는) 데이터베이스 마이그레이션을 제공하지 않습니다. 마이그레이" "션 저장소 경로가 %(path)s에 존재하지 않거나 디렉토리가 아닙니다." #, python-format msgid "%(prior_role_id)s does not imply %(implied_role_id)s" msgstr "%(prior_role_id)s은(는) %(implied_role_id)s을(를) 내포하지 않음" #, python-format msgid "%(property_name)s cannot be less than %(min_length)s characters." msgstr "%(property_name)s은(는) %(min_length)s자 미만일 수 없습니다. " #, python-format msgid "%(property_name)s is not a %(display_expected_type)s" msgstr "%(property_name)s이(가) %(display_expected_type)s이(가) 아님" #, python-format msgid "%(property_name)s should not be greater than %(max_length)s characters." msgstr "%(property_name)s은(는) %(max_length)s자 이하여야 합니다. " #, python-format msgid "%(role_id)s cannot be an implied roles" msgstr "%(role_id)s은(는) 내포된 역할일 수 없음" #, python-format msgid "%s cannot be empty." msgstr "%s은(는) 공백일 수 없습니다. " #, python-format msgid "%s extension does not exist." msgstr "%s 확장자가 존재하지 않습니다." #, python-format msgid "%s field is required and cannot be empty" msgstr "%s 필드가 필요하며 비어 있을 수 없음" #, python-format msgid "%s field(s) cannot be empty" msgstr "%s 필드는 비어 있을 수 없음" #, python-format msgid "" "%s for the LDAP identity backend has been deprecated in the Mitaka release " "in favor of read-only identity LDAP access. It will be removed in the \"O\" " "release." msgstr "" "Mitaka 릴리스에서는 읽기 전용 ID LDAP 액세스를 사용하기 위해 LDAP ID 백엔드" "의 %s이(가) 더 이상 사용되지 않으므로, \"O\" 릴리스에서 제거됩니다." msgid "(Disable insecure_debug mode to suppress these details.)" msgstr "" "(이러한 세부사항을 억제하려면 insecure_debug 모드를 사용 안함으로 설정하십시" "오.)" msgid "--all option cannot be mixed with other options" msgstr "--all 옵션은 다른 옵션과 함께 사용할 수 없음" msgid "A project-scoped token is required to produce a service catalog." msgstr "서비스 카탈로그를 생성하려면 프로젝트 범위 토큰이 필요합니다." msgid "Access token is expired" msgstr "액세스 토큰이 만료됨" msgid "Access token not found" msgstr "액세스 토큰을 찾을 수 없음" msgid "Additional authentications steps required." msgstr "추가 인증 단계가 필요합니다." msgid "An unexpected error occurred when retrieving domain configs" msgstr "도메인 구성 검색 중 예상치 못한 오류 발생" #, python-format msgid "An unexpected error occurred when trying to store %s" msgstr "%s을(를) 저장하려 할 때 예기치 않은 오류가 발생했음" msgid "An unexpected error prevented the server from fulfilling your request." msgstr "예상치 않은 오류가 발생하여 서버가 사용자 요청을 이행하지 못함." #, python-format msgid "" "An unexpected error prevented the server from fulfilling your request: " "%(exception)s" msgstr "" "예상치 않은 오류가 발생하여 서버가 사용자 요청을 이행하지 못함:%(exception)s" msgid "An unhandled exception has occurred: Could not find metadata." msgstr "처리되지 않은 예외가 발생함: 메타데이터를 찾을 수 없음." msgid "At least one option must be provided" msgstr "하나 이상의 옵션을 제공해야 함" msgid "At least one option must be provided, use either --all or --domain-name" msgstr "" "하나 이상의 옵션을 제공해야 합니다. --all 또는 --domain-name을 사용하십시오. " msgid "At least one role should be specified." msgstr "최소한 하나의 역할을 지정해야 합니다." #, python-format msgid "" "Attempted automatic driver selection for assignment based upon " "[identity]\\driver option failed since driver %s is not found. Set " "[assignment]/driver to a valid driver in keystone config." msgstr "" "드라이버 %s을(를) 찾을 수 없으므로 [identity]\\driver 옵션을 기반으로 할당할 " "드라이버를 자동으로 선택하는 데 실패했습니다. keystone 구성에서 [assignment]/" "driver를 올바른 드라이버로 설정하십시오." msgid "Attempted to authenticate with an unsupported method." msgstr "지원되지 않는 방법으로 인증을 시도했습니다." msgid "" "Attempting to use OS-FEDERATION token with V2 Identity Service, use V3 " "Authentication" msgstr "" "V2 ID 서비스에서 OS-FEDERATION 토큰을 사용할 경우 V3 인증을 사용하십시오." msgid "Authentication plugin error." msgstr "인증 플러그인 오류." #, python-format msgid "" "Backend `%(backend)s` is not a valid memcached backend. Valid backends: " "%(backend_list)s" msgstr "" "백엔드 `%(backend)s`이(가) 올바른 memcached 백엔드가 아닙니다. 올바른 백엔" "드: %(backend_list)s" msgid "Cannot authorize a request token with a token issued via delegation." msgstr "위임을 통해 발행된 토큰으로 요청 토큰에 권한을 부여할 수 없습니다." #, python-format msgid "Cannot change %(option_name)s %(attr)s" msgstr "%(option_name)s %(attr)s을(를) 변경할 수 없음" msgid "Cannot change Domain ID" msgstr "도메인 ID를 변경할 수 없음" msgid "Cannot change user ID" msgstr "사용자 ID를 변경할 수 없음" msgid "Cannot change user name" msgstr "사용자 이름을 변경할 수 없음" #, python-format msgid "Cannot create an endpoint with an invalid URL: %(url)s" msgstr "올바르지 않은 URL을 사용하여 엔드포인트를 작성할 수 없음: %(url)s" #, python-format msgid "Cannot create project with parent: %(project_id)s" msgstr "상위로 프로젝트를 작성할 수 없음: %(project_id)s" #, python-format msgid "" "Cannot create project, since it specifies its owner as domain %(domain_id)s, " "but specifies a parent in a different domain (%(parent_domain_id)s)." msgstr "" "소유자를 도메인 %(domain_id)s(으)로 지정하지만 다른 도메인 " "(%(parent_domain_id)s)의 상위를 지정하므로 프로젝트를 생성할 수 없습니다." #, python-format msgid "" "Cannot create project, since its parent (%(domain_id)s) is acting as a " "domain, but project's specified parent_id (%(parent_id)s) does not match " "this domain_id." msgstr "" "상위(%(domain_id)s)가 도메인 역할을 수행하지만 프로젝트 지정 " "parent_id(%(parent_id)s)가 이 domain_id와 일치하지 않으므로 프로젝트를 생성" "할 수 없습니다." msgid "Cannot delete a domain that is enabled, please disable it first." msgstr "" "사용으로 설정된 도메인을 삭제할 수 없습니다. 먼저 해당 도메인을 사용 안함으" "로 설정하십시오." #, python-format msgid "" "Cannot delete project %(project_id)s since its subtree contains enabled " "projects." msgstr "" "서브트리에 사용 설정된 프로젝트가 있으므로 프로젝트 %(project_id)s을(를) 삭제" "할 수 없습니다." #, python-format msgid "" "Cannot delete the project %s since it is not a leaf in the hierarchy. Use " "the cascade option if you want to delete a whole subtree." msgstr "" "계층 구조의 리프가 아니므로 프로젝트 %s을(를) 삭제할 수 없습니다. 전체 하위 " "트리를 삭제하려면 계단식 옵션을 사용하십시오." #, python-format msgid "" "Cannot disable project %(project_id)s since its subtree contains enabled " "projects." msgstr "" "서브트리에 사용 설정된 프로젝트가 있으므로 프로젝트 %(project_id)s을(를) 사" "용 안함으로 설정할 수 없습니다." #, python-format msgid "Cannot enable project %s since it has disabled parents" msgstr "프로젝트 %s에 사용 안함으로 설정된 상위가 있어서 이를 사용할 수 없음" msgid "Cannot list assignments sourced from groups and filtered by user ID." msgstr "" "그룹에서 소스가 공급되고 사용자 ID별로 필터링된 할당을 나열할 수 없습니다." msgid "Cannot list request tokens with a token issued via delegation." msgstr "위임을 통해 발행된 토큰으로 요청 토큰을 나열할 수 없습니다." #, python-format msgid "Cannot open certificate %(cert_file)s. Reason: %(reason)s" msgstr "%(cert_file)s 인증서를 열수 없습니다. 이유: %(reason)s" #, python-format msgid "Cannot remove role that has not been granted, %s" msgstr "권한이 부여되지 않은 역할을 제거할 수 없음: %s" msgid "" "Cannot truncate a driver call without hints list as first parameter after " "self " msgstr "" "자신 뒤의 첫 번째 매개변수와 같은 힌트 목록 없이 드라이버 호출을 자를 수 없음" msgid "Cannot update domain_id of a project that has children." msgstr "하위가 있는 프로젝트의 domain_id를 업데이트할 수 없습니다." msgid "" "Cannot use parents_as_list and parents_as_ids query params at the same time." msgstr "" "parents_as_list 및 parents_as_ids 조회 매개변수를 동시에 사용할 수 없습니다." msgid "" "Cannot use subtree_as_list and subtree_as_ids query params at the same time." msgstr "" "subtree_as_list 및 subtree_as_ids 조회 매개변수를 동시에 사용할 수 없습니다." msgid "Cascade update is only allowed for enabled attribute." msgstr "사용된 속성에만 계단식 업데이트가 허용됩니다." msgid "" "Combining effective and group filter will always result in an empty list." msgstr "결합에 효율적인 그룹 필터는 항상 빈 목록을 생성합니다." msgid "" "Combining effective, domain and inherited filters will always result in an " "empty list." msgstr "결합에 효율적인 도메인과 상속 필터는 항상 빈 목록을 생성합니다." #, python-format msgid "Config API entity at /domains/%s/config" msgstr "/domains/%s/config의 구성 API 엔티티" #, python-format msgid "Conflict occurred attempting to store %(type)s - %(details)s" msgstr "%(type)s을(를) 저장하는 중에 충돌이 발생함 - %(details)s" #, python-format msgid "Conflicting region IDs specified: \"%(url_id)s\" != \"%(ref_id)s\"" msgstr "지정된 리젼 ID가 충돌함: \"%(url_id)s\" != \"%(ref_id)s\"" msgid "Consumer not found" msgstr "이용자를 찾을 수 없음" #, python-format msgid "" "Could not change immutable attribute(s) '%(attributes)s' in target %(target)s" msgstr "%(target)s 대상에서 불변 속성 '%(attributes)s'을(를) 변경할 수 없음" #, python-format msgid "" "Could not determine Identity Provider ID. The configuration option " "%(issuer_attribute)s was not found in the request environment." msgstr "" "ID 제공자 ID를 판별할 수 없습니다. 구성 옵션 %(issuer_attribute)s이(가) 요청 " "환경에 없습니다. " #, python-format msgid "" "Could not find %(group_or_option)s in domain configuration for domain " "%(domain_id)s" msgstr "" "다음 도메인의 도메인 구성에서 %(group_or_option)s을(를) 찾을 수 없습니다. " "%(domain_id)s" #, python-format msgid "Could not find Endpoint Group: %(endpoint_group_id)s" msgstr "엔드포인트 그룹을 찾을 수 없음: %(endpoint_group_id)s" msgid "Could not find Identity Provider identifier in environment" msgstr "환경에서 ID 제공자의 ID를 찾을 수 없음" #, python-format msgid "Could not find Identity Provider: %(idp_id)s" msgstr "%(idp_id)s ID 제공자를 찾을 수 없음" #, python-format msgid "Could not find Service Provider: %(sp_id)s" msgstr "서비스 제공자를 찾을 수 없음: %(sp_id)s" #, python-format msgid "Could not find credential: %(credential_id)s" msgstr "%(credential_id)s 신임 정보를 찾을 수 없음" #, python-format msgid "Could not find domain: %(domain_id)s" msgstr "%(domain_id)s 도메인을 찾을 수 없음" #, python-format msgid "Could not find endpoint: %(endpoint_id)s" msgstr "%(endpoint_id)s 엔드포인트를 찾을 수 없음" #, python-format msgid "" "Could not find federated protocol %(protocol_id)s for Identity Provider: " "%(idp_id)s" msgstr "" "ID 제공자 %(idp_id)s에 대한 연합 프로토콜 %(protocol_id)s을(를) 찾을 수 없음" #, python-format msgid "Could not find group: %(group_id)s" msgstr "%(group_id)s 그룹을 찾을 수 없음" #, python-format msgid "Could not find mapping: %(mapping_id)s" msgstr "%(mapping_id)s 맵핑을 찾을 수 없음" msgid "Could not find policy association" msgstr "정책 연관을 찾을 수 없음" #, python-format msgid "Could not find policy: %(policy_id)s" msgstr "%(policy_id)s 정책을 찾을 수 없음" #, python-format msgid "Could not find project: %(project_id)s" msgstr "%(project_id)s 프로젝트를 찾을 수 없음" #, python-format msgid "Could not find region: %(region_id)s" msgstr "%(region_id)s 리젼을 찾을 수 없음" #, python-format msgid "" "Could not find role assignment with role: %(role_id)s, user or group: " "%(actor_id)s, project or domain: %(target_id)s" msgstr "" "%(role_id)s 역할에 대한 역할 지정을 찾을 수 없음. 사용자 또는 그룹: " "%(actor_id)s, 프로젝트 또는 도메인: %(target_id)s" #, python-format msgid "Could not find role: %(role_id)s" msgstr "%(role_id)s 규칙을 찾을 수 없음" #, python-format msgid "Could not find service: %(service_id)s" msgstr "%(service_id)s 서비스를 찾을 수 없음" #, python-format msgid "Could not find token: %(token_id)s" msgstr "%(token_id)s 토큰을 찾을 수 없음" #, python-format msgid "Could not find trust: %(trust_id)s" msgstr "%(trust_id)s 신뢰를 찾을 수 없음" #, python-format msgid "Could not find user: %(user_id)s" msgstr "%(user_id)s 사용자를 찾을 수 없음" #, python-format msgid "Could not find version: %(version)s" msgstr "%(version)s 버전을 찾을 수 없음" #, python-format msgid "Could not find: %(target)s" msgstr "%(target)s을(를) 찾을 수 없음" msgid "" "Could not map any federated user properties to identity values. Check debug " "logs or the mapping used for additional details." msgstr "" "연합 사용자 특성을 ID 값에 맵핑할 수 없습니다. 추가 세부 사항은 사용된 맵핑 " "또는 디버그 로그를 확인하십시오." msgid "" "Could not map user while setting ephemeral user identity. Either mapping " "rules must specify user id/name or REMOTE_USER environment variable must be " "set." msgstr "" "임시 사용자 ID를 설정하는 중에 사용자를 맵핑할 수 없습니다. 맵핑 규칙이 사용" "자 ID/이름을 지정해야 하거나 REMOTE_USER 환경 변수를 설정해야 합니다. " msgid "Could not validate the access token" msgstr "액세스 토큰을 유효성 검증할 수 없음" msgid "Credential belongs to another user" msgstr "신임 정보가 다른 사용자에 속함" msgid "Credential signature mismatch" msgstr "자격 증명 서명 불일치" #, python-format msgid "" "Direct import of auth plugin %(name)r is deprecated as of Liberty in favor " "of its entrypoint from %(namespace)r and may be removed in N." msgstr "" "Liberty에서 %(namespace)r의 입력점을 사용하기 위해 인증 플러그인 %(name)r의 " "직접 가져오기는 더 이상 사용되지 않으므로, N에서 제거될 수 있습니다." #, python-format msgid "" "Direct import of driver %(name)r is deprecated as of Liberty in favor of its " "entrypoint from %(namespace)r and may be removed in N." msgstr "" "Liberty에서 %(namespace)r의 입력점을 사용하기 위해 드라이버 %(name)r의 직접 " "가져오기는 더 이상 사용되지 않으므로, N에서 제거될 수 있습니다." msgid "" "Disabling an entity where the 'enable' attribute is ignored by configuration." msgstr "구성에서 'enable' 속성이 있는 엔티티의 사용 안함 설정을 무시합니다." #, python-format msgid "Domain (%s)" msgstr "도메인(%s)" #, python-format msgid "Domain cannot be named %s" msgstr "도메인 이름은 %s일 수 없음" #, python-format msgid "Domain cannot have ID %s" msgstr "도메인 ID가 %s일 수 없음" #, python-format msgid "Domain is disabled: %s" msgstr "도메인을 사용 안함: %s" msgid "Domain name cannot contain reserved characters." msgstr "도메인 이름에는 예약된 문자가 포함될 수 없습니다." msgid "Domain scoped token is not supported" msgstr "도메인 범위 지정 토큰은 지원되지 않음" msgid "Domain specific roles are not supported in the V8 role driver" msgstr "V8 역할 드라이버에서는 도메인 특정 역할이 지원되지 않음" #, python-format msgid "" "Domain: %(domain)s already has a configuration defined - ignoring file: " "%(file)s." msgstr "" "%(domain)s 도메인에 이미 정의된 구성이 있음 - 다음 파일을 무시하십시오. " "%(file)s." msgid "Duplicate Entry" msgstr "중복 항목" #, python-format msgid "Duplicate ID, %s." msgstr "중복 ID, %s." #, python-format msgid "Duplicate entry: %s" msgstr "중복된 항목: %s" #, python-format msgid "Duplicate name, %s." msgstr "중복 이름, %s." #, python-format msgid "Duplicate remote ID: %s" msgstr "중복된 원격 ID: %s" msgid "EC2 access key not found." msgstr "EC2 액세스 키를 찾을 수 없습니다." msgid "EC2 signature not supplied." msgstr "EC2 서명이 제공되지 않았습니다." msgid "" "Either --bootstrap-password argument or OS_BOOTSTRAP_PASSWORD must be set." msgstr "--bootstrap-password 인수나 OS_BOOTSTRAP_PASSWORD를 설정해야 합니다." msgid "Enabled field must be a boolean" msgstr "사용으로 설정된 필드는 부울이어야 함" msgid "Enabled field should be a boolean" msgstr "사용으로 설정된 필드는 부울이어야 함" #, python-format msgid "Endpoint %(endpoint_id)s not found in project %(project_id)s" msgstr "%(endpoint_id)s 엔드포인트가 %(project_id)s 프로젝트에 없음 " msgid "Endpoint Group Project Association not found" msgstr "엔드포인트 그룹 프로젝트 연관을 찾을 수 없음" msgid "Ensure configuration option idp_entity_id is set." msgstr "구성 옵션 idp_entity_id가 설정되어 있는지 확인하십시오." msgid "Ensure configuration option idp_sso_endpoint is set." msgstr "구성 옵션 idp_sso_endpoint가 설정되어 있는지 확인하십시오." #, python-format msgid "" "Error parsing configuration file for domain: %(domain)s, file: %(file)s." msgstr "" "%(domain)s 도메인에 대한 구성 파일을 구문 분석하는 중 오류 발생. 파일: " "%(file)s." #, python-format msgid "Error while opening file %(path)s: %(err)s" msgstr "파일 %(path)s 여는 중 오류 발생: %(err)s" #, python-format msgid "Error while parsing line: '%(line)s': %(err)s" msgstr "행: '%(line)s' 구문 분석 중 오류 발생: %(err)s" #, python-format msgid "Error while parsing rules %(path)s: %(err)s" msgstr "규칙 %(path)s 구문 분석 중 오류 발생: %(err)s" #, python-format msgid "Error while reading metadata file, %(reason)s" msgstr "메타데이터 파일을 읽는 중에 오류 발생, %(reason)s" #, python-format msgid "" "Exceeded attempts to register domain %(domain)s to use the SQL driver, the " "last domain that appears to have had it is %(last_domain)s, giving up" msgstr "" "SQL 드라이버를 사용하기 위해 도메인 %(domain)s을(를) 등록하는 시도가 초과되었" "습니다. 드라이버를 보유한 것으로 보이는 마지막 도메인은 %(last_domain)s입니" "다. 포기하는 중" #, python-format msgid "Expected dict or list: %s" msgstr "예상된 사전 또는 목록: %s" msgid "" "Expected signing certificates are not available on the server. Please check " "Keystone configuration." msgstr "" "예상 서명 인증서를 서버에서 사용할 수 없습니다. 키스톤 구성을 확인하십시오." #, python-format msgid "" "Expecting to find %(attribute)s in %(target)s - the server could not comply " "with the request since it is either malformed or otherwise incorrect. The " "client is assumed to be in error." msgstr "" "%(target)s에 %(attribute)s이(가) 있어야 합니다- 서버의 형식이나 다른 항목이 " "올바르지 않기 때문에 서버가 요청을 준수할 수 없습니다. 클라이언트가 오류 상태" "로 간주됩니다." #, python-format msgid "Failed to start the %(name)s server" msgstr "%(name)s 서버를 시작하지 못함" msgid "Failed to validate token" msgstr "토큰을 유효성 검증하지 못했음" msgid "Federation token is expired" msgstr "연합 토큰이 만료됨" #, python-format msgid "" "Field \"remaining_uses\" is set to %(value)s while it must not be set in " "order to redelegate a trust" msgstr "" "필드 \"remaining_uses\"가 %(value)s(으)로 설정되었으나 신뢰를 재위임하려면 설" "정하지 않아야 함" msgid "Found invalid token: scoped to both project and domain." msgstr "" "올바르지 않은 토큰이 있습니다. 프로젝트와 도메인 둘 다 범위에 포함됩니다." #, python-format msgid "Group %s not found in config" msgstr "구성에 그룹 %s을(를) 찾을 수 없음" #, python-format msgid "Group %(group)s is not supported for domain specific configurations" msgstr "도메인 특정 구성에 대해 %(group)s 그룹이 지원되지 않음" #, python-format msgid "" "Group %(group_id)s returned by mapping %(mapping_id)s was not found in the " "backend." msgstr "" "맵핑 %(mapping_id)s별로 리턴된 그룹 %(group_id)s을(를) 백엔드에서 찾지 못했습" "니다." #, python-format msgid "" "Group membership across backend boundaries is not allowed, group in question " "is %(group_id)s, user is %(user_id)s" msgstr "" "경계를 초월한 그룹 멤버십이 허용되지 않습니다. 관련 그룹은 %(group_id)s이고 " "사용자는 %(user_id)s입니다." #, python-format msgid "ID attribute %(id_attr)s not found in LDAP object %(dn)s" msgstr "ID 속성 %(id_attr)s을(를) LDAP 오브젝트 %(dn)s에서 찾을 수 없음" #, python-format msgid "Identity Provider %(idp)s is disabled" msgstr "ID 제공자 %(idp)s이(가) 사용 안함으로 설정됨" msgid "" "Incoming identity provider identifier not included among the accepted " "identifiers." msgstr "승인 ID에 수신 ID 제공자가 포함되지 않습니다." msgid "Invalid EC2 signature." msgstr "올바르지 않은 EC2 서명입니다." #, python-format msgid "Invalid LDAP TLS certs option: %(option)s. Choose one of: %(options)s" msgstr "" "올바르지 않은 LDAP TLS 인증 옵션: %(option)s. 다음 중 하나 선택: %(options)s" #, python-format msgid "Invalid LDAP TLS_AVAIL option: %s. TLS not available" msgstr "올바르지 않은 LDAP TLS_AVAIL 옵션: %s. TLS를 사용할 수 없음" #, python-format msgid "Invalid LDAP deref option: %(option)s. Choose one of: %(options)s" msgstr "" "올바르지 않은 LDAP deref 옵션: %(option)s. 다음 중 하나 선택: %(options)s" #, python-format msgid "Invalid LDAP scope: %(scope)s. Choose one of: %(options)s" msgstr "올바르지 않은 LDAP 범위: %(scope)s. 다음 중 하나를 선택: %(options)s" msgid "Invalid TLS / LDAPS combination" msgstr "잘못된 TLS / LDAPS 결합." #, python-format msgid "Invalid audit info data type: %(data)s (%(type)s)" msgstr "올바르지 않은 감사 정보 데이터 유형: %(data)s (%(type)s)" msgid "Invalid blob in credential" msgstr "신임 정보에 올바르지 blob가 있음" #, python-format msgid "" "Invalid domain name: %(domain)s found in config file name: %(file)s - " "ignoring this file." msgstr "" "구성 파일 이름에 올바르지 않은 도메인 이름 %(domain)s이(가) 있음: %(file)s - " "이 파일을 무시하십시오." #, python-format msgid "Invalid domain specific configuration: %(reason)s" msgstr "올바르지 않은 도메인 특정 구성: %(reason)s" #, python-format msgid "Invalid input for field '%(path)s'. The value is '%(value)s'." msgstr "'%(path)s' 필드에 올바르지 않은 입력입니다. 값은 '%(value)s'입니다." msgid "Invalid limit value" msgstr "올바르지 않은 한계 값" #, python-format msgid "" "Invalid mix of entities for policy association - only Endpoint, Service or " "Region+Service allowed. Request was - Endpoint: %(endpoint_id)s, Service: " "%(service_id)s, Region: %(region_id)s" msgstr "" "정책 연관에 대한 엔티티의 올바르지 않은 조합인 엔드포인트, 서비스 또는 리젼" "+서비스가 허용되었습니다. 요청은 엔드포인트: %(endpoint_id)s, 서비스: " "%(service_id)s, 리젼: %(region_id)s입니다." #, python-format msgid "" "Invalid rule: %(identity_value)s. Both 'groups' and 'domain' keywords must " "be specified." msgstr "" "올바르지 않은 규칙: %(identity_value)s. 'groups' 및 'domain' 키워드가 둘 다 " "지정되어야 합니다." msgid "Invalid signature" msgstr "올바르지 않은 서명" msgid "Invalid user / password" msgstr "올바르지 않은 사용자 / 비밀번호" msgid "Invalid username or TOTP passcode" msgstr "올바르지 않은 사용자 이름 또는 TOTP 비밀번호" msgid "Invalid username or password" msgstr "올바르지 않은 사용자 이름 또는 비밀번호" #, python-format msgid "KVS region %s is already configured. Cannot reconfigure." msgstr "KVS 리젼 %s이(가) 이미 구성되어 있습니다. 재구성할 수 없습니다." #, python-format msgid "Key Value Store not configured: %s" msgstr "키 값 저장소가 구성되지 않음: %s" #, python-format msgid "LDAP %s create" msgstr "LDAP %s 작성" #, python-format msgid "LDAP %s delete" msgstr "LDAP %s 삭제" #, python-format msgid "LDAP %s update" msgstr "LDAP %s 업데이트" msgid "" "Length of transformable resource id > 64, which is max allowed characters" msgstr "변환 가능한 자원 id의 길이가 최대 허용 문자인 64보다 큼" #, python-format msgid "" "Local section in mapping %(mapping_id)s refers to a remote match that " "doesn't exist (e.g. {0} in a local section)." msgstr "" "맵핑 %(mapping_id)s의 로컬 섹션에서 존재하지 않는 원격 일치를 참조합니다(예: " "로컬 섹션의 {0})." #, python-format msgid "Lock Timeout occurred for key, %(target)s" msgstr "키 %(target)s에 대해 잠금 제한시간 초과가 발생함" #, python-format msgid "Lock key must match target key: %(lock)s != %(target)s" msgstr "잠금 키가 대상 키와 일치해야 함: %(lock)s != %(target)s" #, python-format msgid "Malformed endpoint URL (%(endpoint)s), see ERROR log for details." msgstr "" "잘못된 형식의 엔드포인트 URL(%(endpoint)s). 세부사항은 오류 로그를 참조하십시" "오." msgid "Marker could not be found" msgstr "마커를 찾을 수 없음" #, python-format msgid "Max hierarchy depth reached for %s branch." msgstr "%s 분기에 대한 최대 계층 깊이에 도달했습니다." #, python-format msgid "Maximum lock attempts on %s occurred." msgstr "%s에서 최대 잠금 시도가 발생했습니다." #, python-format msgid "Member %(member)s is already a member of group %(group)s" msgstr "%(member)s 구성원은 이미 %(group)s 그룹의 구성원임" #, python-format msgid "Method not callable: %s" msgstr "메소드를 호출할 수 없음: %s" msgid "Missing entity ID from environment" msgstr "환경에서 엔티티 ID가 누락됨" msgid "" "Modifying \"redelegation_count\" upon redelegation is forbidden. Omitting " "this parameter is advised." msgstr "" "재위임 시 \"redelegation_count\"를 수정할 수 없습니다. 이 매개변수는 생략하" "는 것이 좋습니다." msgid "Multiple domains are not supported" msgstr "여러 도메인이 지원되지 않음" msgid "Must be called within an active lock context." msgstr "활성 잠금 컨텍스트 내에서 호출되어야 합니다." msgid "Must specify either domain or project" msgstr "도메인 프로젝트 중 하나를 지정해야 함" msgid "Name field is required and cannot be empty" msgstr "이름 필드가 필요하며 비어 있을 수 없음" msgid "Neither Project Domain ID nor Project Domain Name was provided." msgstr "프로젝트 도메인 ID와 프로젝트 도메인 이름이 제공되지 않았습니다. " msgid "" "No Authorization headers found, cannot proceed with OAuth related calls, if " "running under HTTPd or Apache, ensure WSGIPassAuthorization is set to On." msgstr "" "권한 부여 헤더를 찾을 수 없습니다. HTTPd 또는 Apache에서 실행 중인 경우 " "OAuth 관련 호출을 사용하여 계속 진행할 수 없습니다. WSGIPassAuthorization이 " "On으로 설정되어 있는지 확인하십시오." msgid "No authenticated user" msgstr "인증된 사용자가 없음" msgid "" "No encryption keys found; run keystone-manage fernet_setup to bootstrap one." msgstr "" "암호화 키를 찾을 수 없음: keystone-manage fernet_setup을 부트스트랩 1로 실행" "하십시오." msgid "No options specified" msgstr "지정된 옵션 없음" #, python-format msgid "No policy is associated with endpoint %(endpoint_id)s." msgstr "엔드포인트 %(endpoint_id)s과(와) 연관된 정책이 없습니다." #, python-format msgid "No remaining uses for trust: %(trust_id)s" msgstr "신뢰 %(trust_id)s에 대해 남아 있는 사용이 없음" msgid "No token in the request" msgstr "요청에 토큰이 없음" msgid "Non-default domain is not supported" msgstr "기본이 아닌 도메인은 지원되지 않음" msgid "One of the trust agents is disabled or deleted" msgstr "신뢰 에이전트 중 하나가 사용 안함으로 설정되었거나 삭제됨" #, python-format msgid "" "Option %(option)s found with no group specified while checking domain " "configuration request" msgstr "" "%(option)s 옵션은 도메인 구성 요청 확인 중에 지정된 그룹이 없음을 발견함" #, python-format msgid "" "Option %(option)s in group %(group)s is not supported for domain specific " "configurations" msgstr "" "도메인 특정 구성에 대해 %(group)s 그룹의 %(option)s 옵션이 지원되지않음" #, python-format msgid "Project (%s)" msgstr "프로젝트(%s)" #, python-format msgid "Project ID not found: %(t_id)s" msgstr "프로젝트 ID를 찾을 수 없음: %(t_id)s" msgid "Project field is required and cannot be empty." msgstr "프로젝트 필드는 필수이므로 비어 있어서는 안 됩니다. " #, python-format msgid "Project is disabled: %s" msgstr "프로젝트를 사용 안함: %s" msgid "Project name cannot contain reserved characters." msgstr "프로젝트 이름에 예약된 문자가 포함될 수 없습니다." msgid "Query string is not UTF-8 encoded" msgstr "조회 문자열이 UTF-8로 인코딩되어 있지 않음" #, python-format msgid "" "Reading the default for option %(option)s in group %(group)s is not supported" msgstr "그룹 %(group)s에서 옵션 %(option)s의 기본값 읽기는 지원되지 않음" msgid "Redelegation allowed for delegated by trust only" msgstr "신뢰에서 위임한 경우에만 재위임 허용" #, python-format msgid "" "Remaining redelegation depth of %(redelegation_depth)d out of allowed range " "of [0..%(max_count)d]" msgstr "" "%(redelegation_depth)d의 나머지 재위임 깊이가 허용 범위 [0..%(max_count)d]을" "(를) 벗어남" msgid "" "Remove admin_crud_extension from the paste pipeline, the admin_crud " "extension is now always available. Updatethe [pipeline:admin_api] section in " "keystone-paste.ini accordingly, as it will be removed in the O release." msgstr "" "붙여넣기 파이프라인에서 admin_crud_extension을 제거하십시오. admin_crud 확장" "은 이제 항상 사용할 수 있습니다. O 릴리스에서는 제거되므로 keystone-paste.ini" "에서 [pipeline:admin_api] 섹션을 적절하게 업데이트하십시오." msgid "" "Remove endpoint_filter_extension from the paste pipeline, the endpoint " "filter extension is now always available. Update the [pipeline:api_v3] " "section in keystone-paste.ini accordingly as it will be removed in the O " "release." msgstr "" "붙여넣기 파이프라인에서 endpoint_filter_extension을 제거하십시오. 엔드포인트 " "필터 확장은 이제 항상 사용할 수 있습니다. O 릴리스에서는 제거되므로 keystone-" "paste.ini에서 [pipeline:api_v3] 섹션을 적절하게 업데이트하십시오." msgid "" "Remove federation_extension from the paste pipeline, the federation " "extension is now always available. Update the [pipeline:api_v3] section in " "keystone-paste.ini accordingly, as it will be removed in the O release." msgstr "" "붙여넣기 파이프라인에서 federation_extension을 제거하십시오. 연합 확장은 이" "제 항상 사용할 수 있습니다. O 릴리스에서는 제거되므로 keystone-paste.ini에서 " "[pipeline:api_v3]섹션을 적절하게 업데이트하십시오." msgid "" "Remove oauth1_extension from the paste pipeline, the oauth1 extension is now " "always available. Update the [pipeline:api_v3] section in keystone-paste.ini " "accordingly, as it will be removed in the O release." msgstr "" "붙여넣기 파이프라인에서 oauth1_extension을 제거하십시오. oauth1 확장은 이제 " "항상 사용할 수 있습니다. O 릴리스에서는 제거되므로 keystone-paste.ini에서 " "[pipeline:api_v3]섹션을 적절하게 업데이트하십시오." msgid "" "Remove revoke_extension from the paste pipeline, the revoke extension is now " "always available. Update the [pipeline:api_v3] section in keystone-paste.ini " "accordingly, as it will be removed in the O release." msgstr "" "붙여넣기 파이프라인에서 revoke_extension을 제거하십시오. 취소 확장은 이제 항" "상 사용할 수 있습니다. O 릴리스에서는 제거되므로 keystone-paste.ini에서 " "[pipeline:api_v3]섹션을 적절하게 업데이트하십시오." msgid "" "Remove simple_cert from the paste pipeline, the PKI and PKIz token providers " "are now deprecated and simple_cert was only used insupport of these token " "providers. Update the [pipeline:api_v3] section in keystone-paste.ini " "accordingly, as it will be removed in the O release." msgstr "" "붙여넣기 파이프라인에서 simple_cert를 제거하십시오. PKI 및 PKIz 토큰 제공자" "는 이제 더 이상 사용되지 않으며 simple_cert는 이러한 토큰 제공자를 지원하는 " "데만 사용됩니다. O 릴리스에서는 제거되므로 keystone-paste.ini에서 [pipeline:" "api_v3]섹션을 적절하게 업데이트하십시오." msgid "" "Remove user_crud_extension from the paste pipeline, the user_crud extension " "is now always available. Updatethe [pipeline:public_api] section in keystone-" "paste.ini accordingly, as it will be removed in the O release." msgstr "" "붙여넣기 파이프라인에서 user_crud_extension을 제거하십시오. user_crud 확장은 " "이제 항상 사용할 수 있습니다. O 릴리스에서는 제거되므로 keystone-paste.ini에" "서 [pipeline:public_api] 섹션을 적절하게 업데이트하십시오." msgid "Request Token does not have an authorizing user id" msgstr "요청 토큰에 인증하는 사용자 ID가 없음" #, python-format msgid "" "Request attribute %(attribute)s must be less than or equal to %(size)i. The " "server could not comply with the request because the attribute size is " "invalid (too large). The client is assumed to be in error." msgstr "" "요청 속성 %(attribute)s이(가) %(size)i 이하여야 합니다. 속성 크기가 올바르지 " "않기 때문에(너무 큼) 서버가 요청을 준수할 수 없습니다. 클라이언트가 오류 상태" "로 간주됩니다." msgid "Request must have an origin query parameter" msgstr "요청에는 원본 조회 매개변수가 있어야 함" msgid "Request token is expired" msgstr "요청 토큰이 만료됨" msgid "Request token not found" msgstr "요청 토큰을 찾을 수 없음" msgid "Requested expiration time is more than redelegated trust can provide" msgstr "요청된 만기 시간이 재위임된 신뢰에서 제공할 수 있는 시간보다 큼" #, python-format msgid "" "Requested redelegation depth of %(requested_count)d is greater than allowed " "%(max_count)d" msgstr "" "%(requested_count)d의 요청된 재위임 깊이가 허용되는 %(max_count)d보다 깊음" msgid "" "Running keystone via eventlet is deprecated as of Kilo in favor of running " "in a WSGI server (e.g. mod_wsgi). Support for keystone under eventlet will " "be removed in the \"M\"-Release." msgstr "" "eventlet을 통한 키스톤 실행은 WSGI 서버 실행의 플레이버에 있는 Kilo부터 더 " "이상 사용되지 않습니다(예: mod_wsgi). eventlet 아래의 키스톤에 대한 지원은 " "\"M\"-릴리스에서 제거됩니다." msgid "Scoping to both domain and project is not allowed" msgstr "도메인과 프로젝트에 대한 범위 지정이 허용되지 않음" msgid "Scoping to both domain and trust is not allowed" msgstr "도메인과 신뢰에 대한 범위 지정이 허용되지 않음" msgid "Scoping to both project and trust is not allowed" msgstr "프로젝트와 신뢰에 대한 범위 지정이 허용되지 않음" #, python-format msgid "Service Provider %(sp)s is disabled" msgstr "서비스 제공자 %(sp)s이(가) 사용 안함으로 설정됨" msgid "Some of requested roles are not in redelegated trust" msgstr "요청된 일부 역할이 재위임된 신뢰에 없음" msgid "Specify a domain or project, not both" msgstr "도메인 또는 프로젝트 중 하나 지정" msgid "Specify a user or group, not both" msgstr "사용자 또는 그룹 중 하나 지정" msgid "Specify one of domain or project" msgstr "도메인 또는 프로젝트 중 하나 지정" msgid "Specify one of user or group" msgstr "사용자 또는 그룹 중 하나 지정" #, python-format msgid "" "String length exceeded.The length of string '%(string)s' exceeded the limit " "of column %(type)s(CHAR(%(length)d))." msgstr "" "문자열 길이 제한을 초과합니다. '%(string)s' 문자열 길이가 열의 한도 " "%(type)s(CHAR(%(length)d))을(를) 초과합니다." msgid "Tenant name cannot contain reserved characters." msgstr "테넌트 이름에 예약된 문자가 포함될 수 없습니다." #, python-format msgid "" "The %s extension has been moved into keystone core and as such its " "migrations are maintained by the main keystone database control. Use the " "command: keystone-manage db_sync" msgstr "" "%s 확장이 keystone 코어에 이동되었으므로 기본 keystone 데이터베이스 제어에서 " "마이그레이션을 유지 관리합니다. keystone-manage db_sync 명령을 사용하십시오." msgid "" "The 'expires_at' must not be before now. The server could not comply with " "the request since it is either malformed or otherwise incorrect. The client " "is assumed to be in error." msgstr "" "'expires_at'은 지금보다 이전이어서는 안 됩니다. 형식이 잘못되었거나 올바르지 " "않기 때문에 서버가 요청을 준수할 수 없습니다. 클라이언트는 오류 상태로 간주됩" "니다." msgid "The --all option cannot be used with the --domain-name option" msgstr "--all 옵션은 --domain-name 옵션과 함께 사용할 수 없습니다." #, python-format msgid "The Keystone configuration file %(config_file)s could not be found." msgstr "키스톤 구성 파일 %(config_file)s을(를) 찾을 수 없습니다." #, python-format msgid "" "The Keystone domain-specific configuration has specified more than one SQL " "driver (only one is permitted): %(source)s." msgstr "" "키스톤 도메인 특정 구성에 하나 이상의 SQL 드라이버가 지정됨(하나만 허용됨): " "%(source)s." msgid "The action you have requested has not been implemented." msgstr "요청한 조치가 구현되지 않았습니다." msgid "The authenticated user should match the trustor." msgstr "인증된 사용자는 trustor와 일치해야 합니다." msgid "" "The certificates you requested are not available. It is likely that this " "server does not use PKI tokens otherwise this is the result of " "misconfiguration." msgstr "" "요청한 인증서를 사용할 수 없습니다. 서버가 PKI 토큰을 사용하지 않거나 잘못된 " "구성의 결과로 인해 발생했을 수 있습니다." msgid "The configured token provider does not support bind authentication." msgstr "구성된 토큰 제공자가 바인드 인증을 지원하지 않습니다. " msgid "The creation of projects acting as domains is not allowed in v2." msgstr "도메인 역할을 수행하는 프로젝트 생성은 v2에서 허용되지 않습니다. " #, python-format msgid "" "The password length must be less than or equal to %(size)i. The server could " "not comply with the request because the password is invalid." msgstr "" "비밀번호 길이는 %(size)i 이하여야 합니다. 비밀번호가 올바르지 않아 서버가 요" "청을 준수할 수 없습니다." msgid "The request you have made requires authentication." msgstr "요청에 인증이 필요합니다." msgid "The resource could not be found." msgstr "자원을 찾을 수 없습니다. " msgid "" "The revoke call must not have both domain_id and project_id. This is a bug " "in the Keystone server. The current request is aborted." msgstr "" "취소 호출은 domain_id와 project_id가 둘 다 있으면 안됩니다.키스톤 서버에서 이" "는 버그입니다. 현재 요청이 중단됩니다." msgid "The service you have requested is no longer available on this server." msgstr "요청한 서비스를 더 이상 이 서버에서 사용할 수 없습니다." #, python-format msgid "" "The specified parent region %(parent_region_id)s would create a circular " "region hierarchy." msgstr "지정된 상위 리젼 %(parent_region_id)s에서 순환 리젼 계층을 작성합니다." #, python-format msgid "" "The value of group %(group)s specified in the config should be a dictionary " "of options" msgstr "구성에 지정된 %(group)s 그룹의 값은 옵션의 사전이어야 함" msgid "There should not be any non-oauth parameters" msgstr "non-oauth 매개변수가 없어야 함" #, python-format msgid "This is not a recognized Fernet payload version: %s" msgstr "인식되는 Fernet 페이로드 버전이 아님: %s" #, python-format msgid "This is not a recognized Fernet token %s" msgstr "인식되는 Fernet 토큰 %s이(가) 아님" msgid "" "Timestamp not in expected format. The server could not comply with the " "request since it is either malformed or otherwise incorrect. The client is " "assumed to be in error." msgstr "" "시간소인이 예상된 형식이 아닙니다. 잘못 구성되었거나 올바르지 않으므로 서버" "가 요청을 준수할 수 없습니다. 클라이언트가 오류 상태로 간주됩니다." #, python-format msgid "" "To get a more detailed information on this error, re-run this command for " "the specific domain, i.e.: keystone-manage domain_config_upload --domain-" "name %s" msgstr "" "이 오류에 대한 자세한 정보를 보려면 특정 도메인에 대해 이 명령을 다시 실행하" "십시오. 예: keystone-manage domain_config_upload --domain-name %s" msgid "Token belongs to another user" msgstr "토큰이 다른 사용자에 속함" msgid "Token does not belong to specified tenant." msgstr "토큰이 지정된 테넌트에 속하지 않습니다." msgid "Token version is unrecognizable or unsupported." msgstr "토큰 버전이 인식되지 않거나 지원되지 않습니다. " msgid "Trustee has no delegated roles." msgstr "Trustee에 위임된 역할이 없습니다. " msgid "Trustor is disabled." msgstr "Trustor를 사용하지 않습니다. " #, python-format msgid "" "Trying to update group %(group)s, so that, and only that, group must be " "specified in the config" msgstr "구성에서 그룹만 지정되도록 %(group)s 그룹을 업데이트하려고 합니다. " #, python-format msgid "" "Trying to update option %(option)s in group %(group)s, but config provided " "contains option %(option_other)s instead" msgstr "" "%(group)s 그룹에서 %(option)s 옵션을 업데이트하려고 했지만 제공된 구성에 " "%(option_other)s 옵션이 대신 포함되어 있습니다." #, python-format msgid "" "Trying to update option %(option)s in group %(group)s, so that, and only " "that, option must be specified in the config" msgstr "" "구성에서 옵션만 지정되도록 %(group)s 그룹에서 %(option)s 옵션을 업데이트하려" "고 합니다." msgid "" "Unable to access the keystone database, please check it is configured " "correctly." msgstr "" "키스톤 데이터베이스를 액세스할 수 없습니다. 데이터베이스가 제대로 구성되어 있" "는지 확인하십시오. " #, python-format msgid "Unable to consume trust %(trust_id)s, unable to acquire lock." msgstr "%(trust_id)s 신뢰를 이용할 수 없어서 잠금을 획득할 수 없습니다." #, python-format msgid "" "Unable to delete region %(region_id)s because it or its child regions have " "associated endpoints." msgstr "" "리젼 %(region_id)s 또는 하위 리젼에 연관된 엔드포인트가 있어 삭제할 수 없습니" "다." msgid "Unable to downgrade schema" msgstr "스키마를 다운그레이드할 수 없음" #, python-format msgid "Unable to find valid groups while using mapping %(mapping_id)s" msgstr "%(mapping_id)s 맵핑을 사용하는 중에 올바른 그룹을 찾을 수 없음 " #, python-format msgid "Unable to locate domain config directory: %s" msgstr "%s: 도메인 설정 디렉토리를 찾을 수 없습니다." #, python-format msgid "Unable to lookup user %s" msgstr "%s 사용자를 검색할 수 없음" #, python-format msgid "" "Unable to reconcile identity attribute %(attribute)s as it has conflicting " "values %(new)s and %(old)s" msgstr "" "ID 속성 %(attribute)s에 서로 충돌하는 %(new)s 및 %(old)s 값이 있으므로 이 ID " "속성을 조정할 수 없음" #, python-format msgid "" "Unable to sign SAML assertion. It is likely that this server does not have " "xmlsec1 installed, or this is the result of misconfiguration. Reason " "%(reason)s" msgstr "" "SAML 어설션에 서명할 수 없습니다. 이 서버에 xmlsec1이 설치되지 않았거나 잘못 " "구성된 결과입니다. 이유%(reason)s" msgid "Unable to sign token." msgstr "토큰을 부호화할 수 없습니다." #, python-format msgid "Unexpected assignment type encountered, %s" msgstr "예상치 못한 지정 유형 발생, %s" #, python-format msgid "" "Unexpected combination of grant attributes - User: %(user_id)s, Group: " "%(group_id)s, Project: %(project_id)s, Domain: %(domain_id)s" msgstr "" "grant 속성의 예상치 못한 조합 - 사용자: %(user_id)s, 그룹: %(group_id)s, 프로" "젝트: %(project_id)s, 도메인: %(domain_id)s" #, python-format msgid "Unexpected status requested for JSON Home response, %s" msgstr "JSON 홈 응답에 대해 예상치 못한 상태가 요청됨. %s" msgid "Unknown Target" msgstr "알 수 없는 대상" #, python-format msgid "Unknown domain '%(name)s' specified by --domain-name" msgstr "--domain-name으로 알 수 없는 도메인 '%(name)s'을(를) 지정했음" #, python-format msgid "Unknown token version %s" msgstr "알 수 없는 토큰 버전 %s" #, python-format msgid "Unregistered dependency: %(name)s for %(targets)s" msgstr "등록되지 않은 종속성: %(targets)s의 %(name)s" msgid "Update of `domain_id` is not allowed." msgstr "`domain_id` 업데이트는 허용되지 않습니다." msgid "Update of `is_domain` is not allowed." msgstr "`is_domain`의 업데이트는 허용되지 않습니다. " msgid "Update of `parent_id` is not allowed." msgstr "`parent_id` 업데이트가 허용되지 않습니다." msgid "Update of domain_id is only allowed for root projects." msgstr "domain_id의 업데이트는 루트 프로젝트에만 허용됩니다." msgid "Update of domain_id of projects acting as domains is not allowed." msgstr "도메인 역할을 하는 프로젝트의 domain_id는 업데이트할 수 없습니다." msgid "Use a project scoped token when attempting to create a SAML assertion" msgstr "SAML 어설션을 작성할 때 프로젝트 범위 지정 토큰 사용" msgid "" "Use of the identity driver config to automatically configure the same " "assignment driver has been deprecated, in the \"O\" release, the assignment " "driver will need to be expicitly configured if different than the default " "(SQL)." msgstr "" "ID 드라이버 구성을 사용하여 동일한 할당 드라이버를 자동으로 구성하는 기능은 " "더 이상 사용되지 않습니다. \"O\" 릴리스에서는 기본값(SQL)과 다른 경우 할당 드" "라이버를 명시적으로 구성해야 합니다." #, python-format msgid "User %(u_id)s is unauthorized for tenant %(t_id)s" msgstr "사용자 %(u_id)s이(는) 테넌트 %(t_id)s에 대한 권한이 없습니다. " #, python-format msgid "User %(user_id)s has no access to domain %(domain_id)s" msgstr "" "%(user_id)s 사용자는 %(domain_id)s 도메인에 대한 액세스 권한이 없습니다. " #, python-format msgid "User %(user_id)s has no access to project %(project_id)s" msgstr "" "%(user_id)s 사용자는 %(project_id)s 프로젝트에 대한 액세스 권한이 없습니다. " #, python-format msgid "User %(user_id)s is already a member of group %(group_id)s" msgstr "%(user_id)s 사용자는 이미 %(group_id)s 그룹의 구성원임" #, python-format msgid "User '%(user_id)s' not found in group '%(group_id)s'" msgstr "'%(group_id)s' 그룹에 '%(user_id)s' 사용자가 없음" msgid "User IDs do not match" msgstr "사용자 ID가 일치하지 않음" msgid "" "User auth cannot be built due to missing either user id, or user name with " "domain id, or user name with domain name." msgstr "" "사용자 ID, 도메인 ID가 포함된 사용자 이름 또는 도메인 이름이 포함된 사용자 이" "름이 누락되어 사용자 인증을 빌드할 수 없습니다. " #, python-format msgid "User is disabled: %s" msgstr "사용자를 사용 안함: %s" msgid "User is not a member of the requested project" msgstr "사용자가 요청한 프로젝트의 구성원이 아님" msgid "User is not a trustee." msgstr "사용자는 trustee가 아닙니다." msgid "User not found" msgstr "사용자를 찾을 수 없음" msgid "User not valid for tenant." msgstr "테넌트의 사용자가 올바르지 않습니다." msgid "User roles not supported: tenant_id required" msgstr "사용자 역할이 지원되지 않음: tenant_id 필요" #, python-format msgid "User type %s not supported" msgstr "사용자 유형 %s이(가) 지원되지 않음" msgid "You are not authorized to perform the requested action." msgstr "요청한 조치를 수행할 권한이 없습니다." #, python-format msgid "You are not authorized to perform the requested action: %(action)s" msgstr "요청한 조치(%(action)s)를 수행할 권한이 없습니다." msgid "" "You have tried to create a resource using the admin token. As this token is " "not within a domain you must explicitly include a domain for this resource " "to belong to." msgstr "" "관리자 토큰을 사용하여 자원을 생성하려 했습니다. 이 토큰이 도메인에 없으므" "로, 이 자원이 속할 도메인을 명시적으로 포함시켜야 합니다." msgid "`key_mangler` functions must be callable." msgstr "`key_mangler` 기능을 호출할 수 있어야 합니다." msgid "`key_mangler` option must be a function reference" msgstr "`key_mangler` 옵션은 기능 참조여야 함" msgid "any options" msgstr "옵션" msgid "auth_type is not Negotiate" msgstr "auth_type이 Negotiate가 아님" msgid "authorizing user does not have role required" msgstr "인증하는 사용자에게 필요한 역할이 없음" #, python-format msgid "cannot create a project in a branch containing a disabled project: %s" msgstr "" "사용 안함으로 설정된 프로젝트가 포함된 분기에 프로젝트를 작성할 수 없습니다. " "%s" #, python-format msgid "" "cannot delete an enabled project acting as a domain. Please disable the " "project %s first." msgstr "" "도메인 역할을 하는 사용 설정된 프로젝트를 삭제할 수 없습니다. 프로젝트 %s을" "(를) 먼저 사용하지 않게 설정하십시오." #, python-format msgid "group %(group)s" msgstr "%(group)s 그룹" msgid "" "idp_contact_type must be one of: [technical, other, support, administrative " "or billing." msgstr "" "idp_contact_type은 [기술, 기타, 지원, 관리 또는 비용 청구 중 하나여야 합니다." #, python-format msgid "invalid date format %s" msgstr "올바르지 않은 날짜 형식 %s" #, python-format msgid "" "it is not permitted to have two projects acting as domains with the same " "name: %s" msgstr "이름이 같은 두 프로젝트가 도메인 역할을 수행할 수 없음: %s" #, python-format msgid "" "it is not permitted to have two projects within a domain with the same " "name : %s" msgstr "한 도메인에 이름이 같은 두 프로젝트가 있을 수 없음: %s" msgid "only root projects are allowed to act as domains." msgstr "루트 프로젝트만 도메인 역할을 수행할 수 있습니다." #, python-format msgid "option %(option)s in group %(group)s" msgstr "%(group)s 그룹의 %(option)s 옵션" msgid "provided consumer key does not match stored consumer key" msgstr "제공된 이용자 키가 저장된 이용자 키와 일치하지 않음" msgid "provided request key does not match stored request key" msgstr "제공된 요청 키가 저장된 요청 키와 일치하지 않음" msgid "provided verifier does not match stored verifier" msgstr "제공된 확인자가 저장된 확인자와 일치하지 않음 " msgid "remaining_uses must be a positive integer or null." msgstr "remaining_uses는 양의 정수 또는 널이어야 합니다." msgid "remaining_uses must not be set if redelegation is allowed" msgstr "재위임을 허용하는 경우 remaining_uses를 설정하지 않아야 함" #, python-format msgid "" "request to update group %(group)s, but config provided contains group " "%(group_other)s instead" msgstr "" "%(group)s 그룹을 업데이트하도록 요청했지만 제공된 구성에 %(group_other)s 그룹" "이 대신 포함되어 있습니다." msgid "rescope a scoped token" msgstr "범위 지정된 토큰의 범위 재지정" #, python-format msgid "role %s is not defined" msgstr "역할 %s이(가) 정의되지 않음" msgid "scope.project.id must be specified if include_subtree is also specified" msgstr "include_subtree도 지정된 경우 scope.project.id를 지정해야 함" #, python-format msgid "tls_cacertdir %s not found or is not a directory" msgstr "tls_cacertdir %s를 찾을 수 없으며, 이 디렉토리에 존재하지 않습니다." #, python-format msgid "tls_cacertfile %s not found or is not a file" msgstr "tls_cacertfile %s를 찾을 수 없스며, 그런 파일이 없습니다." #, python-format msgid "token reference must be a KeystoneToken type, got: %s" msgstr "토큰 참조는 KeystoneToken 유형이어야 합니다. %s을(를) 가져왔습니다." msgid "" "update of domain_id is deprecated as of Mitaka and will be removed in O." msgstr "" "Mitaka에서 domain_id 업데이트는 더 이상 사용되지 않으므로, O에서 제거됩니다." #, python-format msgid "" "validated expected to find %(param_name)r in function signature for " "%(func_name)r." msgstr "" "%(func_name)r에 대한 함수 서명에서 %(param_name)r을(를) 찾기 위해 유효성 검증" "하고 예상했습니다. " keystone-9.0.0/keystone/locale/pl_PL/0000775000567000056710000000000012701407246020633 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/locale/pl_PL/LC_MESSAGES/0000775000567000056710000000000012701407246022420 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/locale/pl_PL/LC_MESSAGES/keystone-log-critical.po0000664000567000056710000000164712701407102027167 0ustar jenkinsjenkins00000000000000# Translations template for keystone. # Copyright (C) 2015 OpenStack Foundation # This file is distributed under the same license as the keystone project. # # Translators: # OpenStack Infra , 2015. #zanata msgid "" msgstr "" "Project-Id-Version: keystone 9.0.0.0b2.dev256\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n" "POT-Creation-Date: 2016-01-18 05:50+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2014-08-31 03:19+0000\n" "Last-Translator: openstackjenkins \n" "Language: pl-PL\n" "Plural-Forms: nplurals=3; plural=(n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 " "|| n%100>=20) ? 1 : 2);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Polish (Poland)\n" #, python-format msgid "Unable to open template file %s" msgstr "Błąd podczas otwierania pliku %s" keystone-9.0.0/keystone/policy/0000775000567000056710000000000012701407246017665 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/policy/backends/0000775000567000056710000000000012701407246021437 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/policy/backends/__init__.py0000664000567000056710000000000012701407102023525 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/policy/backends/sql.py0000664000567000056710000000501412701407102022577 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common import sql from keystone import exception from keystone.policy.backends import rules class PolicyModel(sql.ModelBase, sql.DictBase): __tablename__ = 'policy' attributes = ['id', 'blob', 'type'] id = sql.Column(sql.String(64), primary_key=True) blob = sql.Column(sql.JsonBlob(), nullable=False) type = sql.Column(sql.String(255), nullable=False) extra = sql.Column(sql.JsonBlob()) class Policy(rules.Policy): @sql.handle_conflicts(conflict_type='policy') def create_policy(self, policy_id, policy): with sql.session_for_write() as session: ref = PolicyModel.from_dict(policy) session.add(ref) return ref.to_dict() def list_policies(self): with sql.session_for_read() as session: refs = session.query(PolicyModel).all() return [ref.to_dict() for ref in refs] def _get_policy(self, session, policy_id): """Private method to get a policy model object (NOT a dictionary).""" ref = session.query(PolicyModel).get(policy_id) if not ref: raise exception.PolicyNotFound(policy_id=policy_id) return ref def get_policy(self, policy_id): with sql.session_for_read() as session: return self._get_policy(session, policy_id).to_dict() @sql.handle_conflicts(conflict_type='policy') def update_policy(self, policy_id, policy): with sql.session_for_write() as session: ref = self._get_policy(session, policy_id) old_dict = ref.to_dict() old_dict.update(policy) new_policy = PolicyModel.from_dict(old_dict) ref.blob = new_policy.blob ref.type = new_policy.type ref.extra = new_policy.extra return ref.to_dict() def delete_policy(self, policy_id): with sql.session_for_write() as session: ref = self._get_policy(session, policy_id) session.delete(ref) keystone-9.0.0/keystone/policy/backends/rules.py0000664000567000056710000000525612701407102023142 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack, LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Policy engine for keystone""" from oslo_config import cfg from oslo_log import log from oslo_policy import policy as common_policy from keystone import exception from keystone import policy CONF = cfg.CONF LOG = log.getLogger(__name__) _ENFORCER = None def reset(): global _ENFORCER _ENFORCER = None def init(): global _ENFORCER if not _ENFORCER: _ENFORCER = common_policy.Enforcer(CONF) def enforce(credentials, action, target, do_raise=True): """Verifies that the action is valid on the target in this context. :param credentials: user credentials :param action: string representing the action to be checked, which should be colon separated for clarity. :param target: dictionary representing the object of the action for object creation this should be a dictionary representing the location of the object e.g. {'project_id': object.project_id} :raises keystone.exception.Forbidden: If verification fails. Actions should be colon separated for clarity. For example: * identity:list_users """ init() # Add the exception arguments if asked to do a raise extra = {} if do_raise: extra.update(exc=exception.ForbiddenAction, action=action, do_raise=do_raise) return _ENFORCER.enforce(action, target, credentials, **extra) class Policy(policy.PolicyDriverV8): def enforce(self, credentials, action, target): LOG.debug('enforce %(action)s: %(credentials)s', { 'action': action, 'credentials': credentials}) enforce(credentials, action, target) def create_policy(self, policy_id, policy): raise exception.NotImplemented() def list_policies(self): raise exception.NotImplemented() def get_policy(self, policy_id): raise exception.NotImplemented() def update_policy(self, policy_id, policy): raise exception.NotImplemented() def delete_policy(self, policy_id): raise exception.NotImplemented() keystone-9.0.0/keystone/policy/schema.py0000664000567000056710000000171212701407102021467 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. _policy_properties = { 'blob': { 'type': 'string' }, 'type': { 'type': 'string', 'maxLength': 255 } } policy_create = { 'type': 'object', 'properties': _policy_properties, 'required': ['blob', 'type'], 'additionalProperties': True } policy_update = { 'type': 'object', 'properties': _policy_properties, 'minProperties': 1, 'additionalProperties': True } keystone-9.0.0/keystone/policy/__init__.py0000664000567000056710000000124512701407102021767 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.policy import controllers # noqa from keystone.policy.core import * # noqa keystone-9.0.0/keystone/policy/core.py0000664000567000056710000001054112701407102021157 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Main entry point into the Policy service.""" import abc from oslo_config import cfg import six from keystone.common import dependency from keystone.common import manager from keystone import exception from keystone import notifications CONF = cfg.CONF @dependency.provider('policy_api') class Manager(manager.Manager): """Default pivot point for the Policy backend. See :mod:`keystone.common.manager.Manager` for more details on how this dynamically calls the backend. """ driver_namespace = 'keystone.policy' _POLICY = 'policy' def __init__(self): super(Manager, self).__init__(CONF.policy.driver) def create_policy(self, policy_id, policy, initiator=None): ref = self.driver.create_policy(policy_id, policy) notifications.Audit.created(self._POLICY, policy_id, initiator) return ref def get_policy(self, policy_id): try: return self.driver.get_policy(policy_id) except exception.NotFound: raise exception.PolicyNotFound(policy_id=policy_id) def update_policy(self, policy_id, policy, initiator=None): if 'id' in policy and policy_id != policy['id']: raise exception.ValidationError('Cannot change policy ID') try: ref = self.driver.update_policy(policy_id, policy) except exception.NotFound: raise exception.PolicyNotFound(policy_id=policy_id) notifications.Audit.updated(self._POLICY, policy_id, initiator) return ref @manager.response_truncated def list_policies(self, hints=None): # NOTE(henry-nash): Since the advantage of filtering or list limiting # of policies at the driver level is minimal, we leave this to the # caller. return self.driver.list_policies() def delete_policy(self, policy_id, initiator=None): try: ret = self.driver.delete_policy(policy_id) except exception.NotFound: raise exception.PolicyNotFound(policy_id=policy_id) notifications.Audit.deleted(self._POLICY, policy_id, initiator) return ret @six.add_metaclass(abc.ABCMeta) class PolicyDriverV8(object): def _get_list_limit(self): return CONF.policy.list_limit or CONF.list_limit @abc.abstractmethod def enforce(self, context, credentials, action, target): """Verify that a user is authorized to perform action. For more information on a full implementation of this see: `keystone.policy.backends.rules.Policy.enforce` """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def create_policy(self, policy_id, policy): """Store a policy blob. :raises keystone.exception.Conflict: If a duplicate policy exists. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_policies(self): """List all policies.""" raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_policy(self, policy_id): """Retrieve a specific policy blob. :raises keystone.exception.PolicyNotFound: If the policy doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def update_policy(self, policy_id, policy): """Update a policy blob. :raises keystone.exception.PolicyNotFound: If the policy doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_policy(self, policy_id): """Remove a policy blob. :raises keystone.exception.PolicyNotFound: If the policy doesn't exist. """ raise exception.NotImplemented() # pragma: no cover Driver = manager.create_legacy_driver(PolicyDriverV8) keystone-9.0.0/keystone/policy/controllers.py0000664000567000056710000000437312701407102022603 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common import controller from keystone.common import dependency from keystone.common import validation from keystone import notifications from keystone.policy import schema @dependency.requires('policy_api') class PolicyV3(controller.V3Controller): collection_name = 'policies' member_name = 'policy' @controller.protected() @validation.validated(schema.policy_create, 'policy') def create_policy(self, context, policy): ref = self._assign_unique_id(self._normalize_dict(policy)) initiator = notifications._get_request_audit_info(context) ref = self.policy_api.create_policy(ref['id'], ref, initiator) return PolicyV3.wrap_member(context, ref) @controller.filterprotected('type') def list_policies(self, context, filters): hints = PolicyV3.build_driver_hints(context, filters) refs = self.policy_api.list_policies(hints=hints) return PolicyV3.wrap_collection(context, refs, hints=hints) @controller.protected() def get_policy(self, context, policy_id): ref = self.policy_api.get_policy(policy_id) return PolicyV3.wrap_member(context, ref) @controller.protected() @validation.validated(schema.policy_update, 'policy') def update_policy(self, context, policy_id, policy): initiator = notifications._get_request_audit_info(context) ref = self.policy_api.update_policy(policy_id, policy, initiator) return PolicyV3.wrap_member(context, ref) @controller.protected() def delete_policy(self, context, policy_id): initiator = notifications._get_request_audit_info(context) return self.policy_api.delete_policy(policy_id, initiator) keystone-9.0.0/keystone/policy/routers.py0000664000567000056710000000173312701407102021735 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common import router from keystone.common import wsgi from keystone.policy import controllers class Routers(wsgi.RoutersBase): def append_v3_routers(self, mapper, routers): policy_controller = controllers.PolicyV3() routers.append(router.Router(policy_controller, 'policies', 'policy', resource_descriptions=self.v3_resources)) keystone-9.0.0/keystone/server/0000775000567000056710000000000012701407246017674 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/server/__init__.py0000664000567000056710000000000012701407102021762 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/server/wsgi.py0000664000567000056710000000347412701407102021216 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from oslo_config import cfg import oslo_i18n # NOTE(dstanek): i18n.enable_lazy() must be called before # keystone.i18n._() is called to ensure it has the desired lazy lookup # behavior. This includes cases, like keystone.exceptions, where # keystone.i18n._() is called at import time. oslo_i18n.enable_lazy() from keystone.common import config from keystone.common import environment from keystone.server import common from keystone.version import service as keystone_service CONF = cfg.CONF def initialize_application(name, post_log_configured_function=lambda: None): common.configure() # Log the options used when starting if we're in debug mode... if CONF.debug: CONF.log_opt_values(logging.getLogger(CONF.prog), logging.DEBUG) environment.use_stdlib() post_log_configured_function() def loadapp(): return keystone_service.loadapp( 'config:%s' % config.find_paste_config(), name) _unused, application = common.setup_backends( startup_application_fn=loadapp) return application def initialize_admin_application(): return initialize_application('admin') def initialize_public_application(): return initialize_application('main') keystone-9.0.0/keystone/server/common.py0000664000567000056710000000316012701407102021525 0ustar jenkinsjenkins00000000000000 # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log from keystone.common import config from keystone.common import dependency from keystone.common import sql from keystone.i18n import _LW from keystone.server import backends CONF = cfg.CONF LOG = log.getLogger(__name__) def configure(version=None, config_files=None, pre_setup_logging_fn=lambda: None): config.configure() sql.initialize() config.set_config_defaults() CONF(project='keystone', version=version, default_config_files=config_files) pre_setup_logging_fn() config.setup_logging() if CONF.insecure_debug: LOG.warning(_LW( 'insecure_debug is enabled so responses may include sensitive ' 'information.')) def setup_backends(load_extra_backends_fn=lambda: {}, startup_application_fn=lambda: None): drivers = backends.load_backends() drivers.update(load_extra_backends_fn()) res = startup_application_fn() drivers.update(dependency.resolve_future_dependencies()) return drivers, res keystone-9.0.0/keystone/server/eventlet.py0000664000567000056710000001265512701407102022074 0ustar jenkinsjenkins00000000000000 # Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import os import socket from oslo_concurrency import processutils from oslo_config import cfg import oslo_i18n from oslo_service import service from oslo_service import systemd import pbr.version # NOTE(dstanek): i18n.enable_lazy() must be called before # keystone.i18n._() is called to ensure it has the desired lazy lookup # behavior. This includes cases, like keystone.exceptions, where # keystone.i18n._() is called at import time. oslo_i18n.enable_lazy() from keystone.common import config from keystone.common import environment from keystone.common import utils from keystone.i18n import _ from keystone.server import common from keystone.version import service as keystone_service CONF = cfg.CONF class ServerWrapper(object): """Wraps a Server with some launching info & capabilities.""" def __init__(self, server, workers): self.server = server self.workers = workers def launch_with(self, launcher): self.server.listen() if self.workers > 1: # Use multi-process launcher launcher.launch_service(self.server, self.workers) else: # Use single process launcher launcher.launch_service(self.server) def create_server(conf, name, host, port, workers): app = keystone_service.loadapp('config:%s' % conf, name) server = environment.Server(app, host=host, port=port, keepalive=CONF.eventlet_server.tcp_keepalive, keepidle=CONF.eventlet_server.tcp_keepidle) if CONF.eventlet_server_ssl.enable: server.set_ssl(CONF.eventlet_server_ssl.certfile, CONF.eventlet_server_ssl.keyfile, CONF.eventlet_server_ssl.ca_certs, CONF.eventlet_server_ssl.cert_required) return name, ServerWrapper(server, workers) def serve(*servers): logging.warning(_('Running keystone via eventlet is deprecated as of Kilo ' 'in favor of running in a WSGI server (e.g. mod_wsgi). ' 'Support for keystone under eventlet will be removed in ' 'the "M"-Release.')) if max([server[1].workers for server in servers]) > 1: launcher = service.ProcessLauncher(CONF) else: launcher = service.ServiceLauncher(CONF) for name, server in servers: try: server.launch_with(launcher) except socket.error: logging.exception(_('Failed to start the %(name)s server') % { 'name': name}) raise # notify calling process we are ready to serve systemd.notify_once() for name, server in servers: launcher.wait() def _get_workers(worker_type_config_opt): # Get the value from config, if the config value is None (not set), return # the number of cpus with a minimum of 2. worker_count = CONF.eventlet_server.get(worker_type_config_opt) if not worker_count: worker_count = max(2, processutils.get_worker_count()) return worker_count def configure_threading(): monkeypatch_thread = not CONF.standard_threads pydev_debug_url = utils.setup_remote_pydev_debug() if pydev_debug_url: # in order to work around errors caused by monkey patching we have to # set the thread to False. An explanation is here: # http://lists.openstack.org/pipermail/openstack-dev/2012-August/ # 000794.html monkeypatch_thread = False environment.use_eventlet(monkeypatch_thread) def run(possible_topdir): dev_conf = os.path.join(possible_topdir, 'etc', 'keystone.conf') config_files = None if os.path.exists(dev_conf): config_files = [dev_conf] common.configure( version=pbr.version.VersionInfo('keystone').version_string(), config_files=config_files, pre_setup_logging_fn=configure_threading) paste_config = config.find_paste_config() def create_servers(): admin_worker_count = _get_workers('admin_workers') public_worker_count = _get_workers('public_workers') servers = [] servers.append(create_server(paste_config, 'admin', CONF.eventlet_server.admin_bind_host, CONF.eventlet_server.admin_port, admin_worker_count)) servers.append(create_server(paste_config, 'main', CONF.eventlet_server.public_bind_host, CONF.eventlet_server.public_port, public_worker_count)) return servers _unused, servers = common.setup_backends( startup_application_fn=create_servers) serve(*servers) keystone-9.0.0/keystone/server/backends.py0000664000567000056710000000563112701407102022014 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone import assignment from keystone import auth from keystone import catalog from keystone.common import cache from keystone import credential from keystone import endpoint_policy from keystone import federation from keystone import identity from keystone import oauth1 from keystone import policy from keystone import resource from keystone import revoke from keystone import token from keystone import trust def load_backends(): # Configure and build the cache cache.configure_cache() cache.configure_cache(region=catalog.COMPUTED_CATALOG_REGION) cache.apply_invalidation_patch( region=catalog.COMPUTED_CATALOG_REGION, region_name=catalog.COMPUTED_CATALOG_REGION.name) cache.configure_cache(region=assignment.COMPUTED_ASSIGNMENTS_REGION) cache.apply_invalidation_patch( region=assignment.COMPUTED_ASSIGNMENTS_REGION, region_name=assignment.COMPUTED_ASSIGNMENTS_REGION.name) # Ensure that the identity driver is created before the assignment manager # and that the assignment driver is created before the resource manager. # The default resource driver depends on assignment, which in turn # depends on identity - hence we need to ensure the chain is available. # TODO(morganfainberg): In "O" release move _IDENTITY_API to be directly # instantiated in the DRIVERS dict once assignment driver being selected # based upon [identity]/driver is removed. _IDENTITY_API = identity.Manager() _ASSIGNMENT_API = assignment.Manager() DRIVERS = dict( assignment_api=_ASSIGNMENT_API, catalog_api=catalog.Manager(), credential_api=credential.Manager(), domain_config_api=resource.DomainConfigManager(), endpoint_policy_api=endpoint_policy.Manager(), federation_api=federation.Manager(), id_generator_api=identity.generator.Manager(), id_mapping_api=identity.MappingManager(), identity_api=_IDENTITY_API, shadow_users_api=identity.ShadowUsersManager(), oauth_api=oauth1.Manager(), policy_api=policy.Manager(), resource_api=resource.Manager(), revoke_api=revoke.Manager(), role_api=assignment.RoleManager(), token_api=token.persistence.Manager(), trust_api=trust.Manager(), token_provider_api=token.provider.Manager()) auth.controllers.load_auth_methods() return DRIVERS keystone-9.0.0/keystone/service.py0000664000567000056710000000360112701407102020367 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils import six from keystone.version import service def deprecated_to_version(f): """Specialized deprecation wrapper for service module. This wraps the standard deprecation wrapper and fills in the method names automatically. """ @six.wraps(f) def wrapper(*args, **kwargs): x = versionutils.deprecated( what='keystone.service.' + f.__name__ + '()', as_of=versionutils.deprecated.MITAKA, remove_in=+2, in_favor_of='keystone.version.service.' + f.__name__ + '()') return x(f) return wrapper() @deprecated_to_version def public_app_factory(global_conf, **local_conf): return service.public_app_factory(global_conf, **local_conf) @deprecated_to_version def admin_app_factory(global_conf, **local_conf): return service.admin_app_factory(global_conf, **local_conf) @deprecated_to_version def public_version_app_factory(global_conf, **local_conf): return service.public_version_app_factory(global_conf, **local_conf) @deprecated_to_version def admin_version_app_factory(global_conf, **local_conf): return service.admin_app_factory(global_conf, **local_conf) @deprecated_to_version def v3_app_factory(global_conf, **local_conf): return service.v3_app_factory(global_conf, **local_conf) keystone-9.0.0/keystone/oauth1/0000775000567000056710000000000012701407246017567 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/oauth1/backends/0000775000567000056710000000000012701407246021341 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/oauth1/backends/__init__.py0000664000567000056710000000000012701407102023427 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/oauth1/backends/sql.py0000664000567000056710000002457312701407102022514 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import random as _random import uuid from oslo_serialization import jsonutils from oslo_utils import timeutils from keystone.common import sql from keystone.common import utils from keystone import exception from keystone.i18n import _ from keystone.oauth1 import core random = _random.SystemRandom() class Consumer(sql.ModelBase, sql.DictBase): __tablename__ = 'consumer' attributes = ['id', 'description', 'secret'] id = sql.Column(sql.String(64), primary_key=True, nullable=False) description = sql.Column(sql.String(64), nullable=True) secret = sql.Column(sql.String(64), nullable=False) extra = sql.Column(sql.JsonBlob(), nullable=False) class RequestToken(sql.ModelBase, sql.DictBase): __tablename__ = 'request_token' attributes = ['id', 'request_secret', 'verifier', 'authorizing_user_id', 'requested_project_id', 'role_ids', 'consumer_id', 'expires_at'] id = sql.Column(sql.String(64), primary_key=True, nullable=False) request_secret = sql.Column(sql.String(64), nullable=False) verifier = sql.Column(sql.String(64), nullable=True) authorizing_user_id = sql.Column(sql.String(64), nullable=True) requested_project_id = sql.Column(sql.String(64), nullable=False) role_ids = sql.Column(sql.Text(), nullable=True) consumer_id = sql.Column(sql.String(64), sql.ForeignKey('consumer.id'), nullable=False, index=True) expires_at = sql.Column(sql.String(64), nullable=True) @classmethod def from_dict(cls, user_dict): return cls(**user_dict) def to_dict(self): return dict(self.items()) class AccessToken(sql.ModelBase, sql.DictBase): __tablename__ = 'access_token' attributes = ['id', 'access_secret', 'authorizing_user_id', 'project_id', 'role_ids', 'consumer_id', 'expires_at'] id = sql.Column(sql.String(64), primary_key=True, nullable=False) access_secret = sql.Column(sql.String(64), nullable=False) authorizing_user_id = sql.Column(sql.String(64), nullable=False, index=True) project_id = sql.Column(sql.String(64), nullable=False) role_ids = sql.Column(sql.Text(), nullable=False) consumer_id = sql.Column(sql.String(64), sql.ForeignKey('consumer.id'), nullable=False) expires_at = sql.Column(sql.String(64), nullable=True) @classmethod def from_dict(cls, user_dict): return cls(**user_dict) def to_dict(self): return dict(self.items()) class OAuth1(core.Oauth1DriverV8): def _get_consumer(self, session, consumer_id): consumer_ref = session.query(Consumer).get(consumer_id) if consumer_ref is None: raise exception.NotFound(_('Consumer not found')) return consumer_ref def get_consumer_with_secret(self, consumer_id): with sql.session_for_read() as session: consumer_ref = self._get_consumer(session, consumer_id) return consumer_ref.to_dict() def get_consumer(self, consumer_id): return core.filter_consumer( self.get_consumer_with_secret(consumer_id)) def create_consumer(self, consumer_ref): with sql.session_for_write() as session: consumer = Consumer.from_dict(consumer_ref) session.add(consumer) return consumer.to_dict() def _delete_consumer(self, session, consumer_id): consumer_ref = self._get_consumer(session, consumer_id) session.delete(consumer_ref) def _delete_request_tokens(self, session, consumer_id): q = session.query(RequestToken) req_tokens = q.filter_by(consumer_id=consumer_id) req_tokens_list = set([x.id for x in req_tokens]) for token_id in req_tokens_list: token_ref = self._get_request_token(session, token_id) session.delete(token_ref) def _delete_access_tokens(self, session, consumer_id): q = session.query(AccessToken) acc_tokens = q.filter_by(consumer_id=consumer_id) acc_tokens_list = set([x.id for x in acc_tokens]) for token_id in acc_tokens_list: token_ref = self._get_access_token(session, token_id) session.delete(token_ref) def delete_consumer(self, consumer_id): with sql.session_for_write() as session: self._delete_request_tokens(session, consumer_id) self._delete_access_tokens(session, consumer_id) self._delete_consumer(session, consumer_id) def list_consumers(self): with sql.session_for_read() as session: cons = session.query(Consumer) return [core.filter_consumer(x.to_dict()) for x in cons] def update_consumer(self, consumer_id, consumer_ref): with sql.session_for_write() as session: consumer = self._get_consumer(session, consumer_id) old_consumer_dict = consumer.to_dict() old_consumer_dict.update(consumer_ref) new_consumer = Consumer.from_dict(old_consumer_dict) consumer.description = new_consumer.description consumer.extra = new_consumer.extra return core.filter_consumer(consumer.to_dict()) def create_request_token(self, consumer_id, requested_project, request_token_duration): request_token_id = uuid.uuid4().hex request_token_secret = uuid.uuid4().hex expiry_date = None if request_token_duration: now = timeutils.utcnow() future = now + datetime.timedelta(seconds=request_token_duration) expiry_date = utils.isotime(future, subsecond=True) ref = {} ref['id'] = request_token_id ref['request_secret'] = request_token_secret ref['verifier'] = None ref['authorizing_user_id'] = None ref['requested_project_id'] = requested_project ref['role_ids'] = None ref['consumer_id'] = consumer_id ref['expires_at'] = expiry_date with sql.session_for_write() as session: token_ref = RequestToken.from_dict(ref) session.add(token_ref) return token_ref.to_dict() def _get_request_token(self, session, request_token_id): token_ref = session.query(RequestToken).get(request_token_id) if token_ref is None: raise exception.NotFound(_('Request token not found')) return token_ref def get_request_token(self, request_token_id): with sql.session_for_read() as session: token_ref = self._get_request_token(session, request_token_id) return token_ref.to_dict() def authorize_request_token(self, request_token_id, user_id, role_ids): with sql.session_for_write() as session: token_ref = self._get_request_token(session, request_token_id) token_dict = token_ref.to_dict() token_dict['authorizing_user_id'] = user_id token_dict['verifier'] = ''.join(random.sample(core.VERIFIER_CHARS, 8)) token_dict['role_ids'] = jsonutils.dumps(role_ids) new_token = RequestToken.from_dict(token_dict) for attr in RequestToken.attributes: if (attr == 'authorizing_user_id' or attr == 'verifier' or attr == 'role_ids'): setattr(token_ref, attr, getattr(new_token, attr)) return token_ref.to_dict() def create_access_token(self, request_id, access_token_duration): access_token_id = uuid.uuid4().hex access_token_secret = uuid.uuid4().hex with sql.session_for_write() as session: req_token_ref = self._get_request_token(session, request_id) token_dict = req_token_ref.to_dict() expiry_date = None if access_token_duration: now = timeutils.utcnow() future = (now + datetime.timedelta(seconds=access_token_duration)) expiry_date = utils.isotime(future, subsecond=True) # add Access Token ref = {} ref['id'] = access_token_id ref['access_secret'] = access_token_secret ref['authorizing_user_id'] = token_dict['authorizing_user_id'] ref['project_id'] = token_dict['requested_project_id'] ref['role_ids'] = token_dict['role_ids'] ref['consumer_id'] = token_dict['consumer_id'] ref['expires_at'] = expiry_date token_ref = AccessToken.from_dict(ref) session.add(token_ref) # remove request token, it's been used session.delete(req_token_ref) return token_ref.to_dict() def _get_access_token(self, session, access_token_id): token_ref = session.query(AccessToken).get(access_token_id) if token_ref is None: raise exception.NotFound(_('Access token not found')) return token_ref def get_access_token(self, access_token_id): with sql.session_for_read() as session: token_ref = self._get_access_token(session, access_token_id) return token_ref.to_dict() def list_access_tokens(self, user_id): with sql.session_for_read() as session: q = session.query(AccessToken) user_auths = q.filter_by(authorizing_user_id=user_id) return [core.filter_token(x.to_dict()) for x in user_auths] def delete_access_token(self, user_id, access_token_id): with sql.session_for_write() as session: token_ref = self._get_access_token(session, access_token_id) token_dict = token_ref.to_dict() if token_dict['authorizing_user_id'] != user_id: raise exception.Unauthorized(_('User IDs do not match')) session.delete(token_ref) keystone-9.0.0/keystone/oauth1/schema.py0000664000567000056710000000202212701407102021364 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common import validation from keystone.common.validation import parameter_types _consumer_properties = { 'description': validation.nullable(parameter_types.description) } consumer_create = { 'type': 'object', 'properties': _consumer_properties, 'additionalProperties': True } consumer_update = { 'type': 'object', 'properties': _consumer_properties, 'not': { 'required': ['secret'] }, 'minProperties': 1, 'additionalProperties': True } keystone-9.0.0/keystone/oauth1/__init__.py0000664000567000056710000000116512701407102021672 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.oauth1.core import * # noqa keystone-9.0.0/keystone/oauth1/core.py0000664000567000056710000002702412701407102021065 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Main entry point into the OAuth1 service.""" from __future__ import absolute_import import abc import string import uuid import oauthlib.common from oauthlib import oauth1 from oslo_config import cfg from oslo_log import log import six from keystone.common import dependency from keystone.common import extension from keystone.common import manager from keystone import exception from keystone.i18n import _LE from keystone import notifications RequestValidator = oauth1.RequestValidator Client = oauth1.Client AccessTokenEndpoint = oauth1.AccessTokenEndpoint ResourceEndpoint = oauth1.ResourceEndpoint AuthorizationEndpoint = oauth1.AuthorizationEndpoint SIG_HMAC = oauth1.SIGNATURE_HMAC RequestTokenEndpoint = oauth1.RequestTokenEndpoint oRequest = oauthlib.common.Request # The characters used to generate verifiers are limited to alphanumerical # values for ease of manual entry. Commonly confused characters are omitted. VERIFIER_CHARS = string.ascii_letters + string.digits CONFUSED_CHARS = 'jiIl1oO0' VERIFIER_CHARS = ''.join(c for c in VERIFIER_CHARS if c not in CONFUSED_CHARS) class Token(object): def __init__(self, key, secret): self.key = key self.secret = secret self.verifier = None def set_verifier(self, verifier): self.verifier = verifier CONF = cfg.CONF LOG = log.getLogger(__name__) def token_generator(*args, **kwargs): return uuid.uuid4().hex EXTENSION_DATA = { 'name': 'OpenStack OAUTH1 API', 'namespace': 'http://docs.openstack.org/identity/api/ext/' 'OS-OAUTH1/v1.0', 'alias': 'OS-OAUTH1', 'updated': '2013-07-07T12:00:0-00:00', 'description': 'OpenStack OAuth 1.0a Delegated Auth Mechanism.', 'links': [ { 'rel': 'describedby', 'type': 'text/html', 'href': 'http://specs.openstack.org/openstack/keystone-specs/api/' 'v3/identity-api-v3-os-oauth1-ext.html', } ]} extension.register_admin_extension(EXTENSION_DATA['alias'], EXTENSION_DATA) extension.register_public_extension(EXTENSION_DATA['alias'], EXTENSION_DATA) def filter_consumer(consumer_ref): """Filter out private items in a consumer dict. 'secret' is never returned. :returns: consumer_ref """ if consumer_ref: consumer_ref = consumer_ref.copy() consumer_ref.pop('secret', None) return consumer_ref def filter_token(access_token_ref): """Filter out private items in an access token dict. 'access_secret' is never returned. :returns: access_token_ref """ if access_token_ref: access_token_ref = access_token_ref.copy() access_token_ref.pop('access_secret', None) return access_token_ref def get_oauth_headers(headers): parameters = {} # The incoming headers variable is your usual heading from context # In an OAuth signed req, where the oauth variables are in the header, # they with the key 'Authorization'. if headers and 'Authorization' in headers: # A typical value for Authorization is seen below # 'OAuth realm="", oauth_body_hash="2jm%3D", oauth_nonce="14475435" # along with other oauth variables, the 'OAuth ' part is trimmed # to split the rest of the headers. auth_header = headers['Authorization'] params = oauth1.rfc5849.utils.parse_authorization_header(auth_header) parameters.update(dict(params)) return parameters else: msg = _LE('Cannot retrieve Authorization headers') LOG.error(msg) raise exception.OAuthHeadersMissingError() def extract_non_oauth_params(query_string): params = oauthlib.common.extract_params(query_string) return {k: v for k, v in params if not k.startswith('oauth_')} @dependency.provider('oauth_api') class Manager(manager.Manager): """Default pivot point for the OAuth1 backend. See :mod:`keystone.common.manager.Manager` for more details on how this dynamically calls the backend. """ driver_namespace = 'keystone.oauth1' _ACCESS_TOKEN = "OS-OAUTH1:access_token" _REQUEST_TOKEN = "OS-OAUTH1:request_token" _CONSUMER = "OS-OAUTH1:consumer" def __init__(self): super(Manager, self).__init__(CONF.oauth1.driver) def create_consumer(self, consumer_ref, initiator=None): consumer_ref = consumer_ref.copy() consumer_ref['secret'] = uuid.uuid4().hex ret = self.driver.create_consumer(consumer_ref) notifications.Audit.created(self._CONSUMER, ret['id'], initiator) return ret def update_consumer(self, consumer_id, consumer_ref, initiator=None): ret = self.driver.update_consumer(consumer_id, consumer_ref) notifications.Audit.updated(self._CONSUMER, consumer_id, initiator) return ret def delete_consumer(self, consumer_id, initiator=None): ret = self.driver.delete_consumer(consumer_id) notifications.Audit.deleted(self._CONSUMER, consumer_id, initiator) return ret def create_access_token(self, request_id, access_token_duration, initiator=None): ret = self.driver.create_access_token(request_id, access_token_duration) notifications.Audit.created(self._ACCESS_TOKEN, ret['id'], initiator) return ret def delete_access_token(self, user_id, access_token_id, initiator=None): ret = self.driver.delete_access_token(user_id, access_token_id) notifications.Audit.deleted(self._ACCESS_TOKEN, access_token_id, initiator) return ret def create_request_token(self, consumer_id, requested_project, request_token_duration, initiator=None): ret = self.driver.create_request_token( consumer_id, requested_project, request_token_duration) notifications.Audit.created(self._REQUEST_TOKEN, ret['id'], initiator) return ret @six.add_metaclass(abc.ABCMeta) class Oauth1DriverV8(object): """Interface description for an OAuth1 driver.""" @abc.abstractmethod def create_consumer(self, consumer_ref): """Create consumer. :param consumer_ref: consumer ref with consumer name :type consumer_ref: dict :returns: consumer_ref """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def update_consumer(self, consumer_id, consumer_ref): """Update consumer. :param consumer_id: id of consumer to update :type consumer_id: string :param consumer_ref: new consumer ref with consumer name :type consumer_ref: dict :returns: consumer_ref """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_consumers(self): """List consumers. :returns: list of consumers """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_consumer(self, consumer_id): """Get consumer, returns the consumer id (key) and description. :param consumer_id: id of consumer to get :type consumer_id: string :returns: consumer_ref """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_consumer_with_secret(self, consumer_id): """Like get_consumer(), but also returns consumer secret. Returned dictionary consumer_ref includes consumer secret. Secrets should only be shared upon consumer creation; the consumer secret is required to verify incoming OAuth requests. :param consumer_id: id of consumer to get :type consumer_id: string :returns: consumer_ref containing consumer secret """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_consumer(self, consumer_id): """Delete consumer. :param consumer_id: id of consumer to get :type consumer_id: string :returns: None. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_access_tokens(self, user_id): """List access tokens. :param user_id: search for access tokens authorized by given user id :type user_id: string :returns: list of access tokens the user has authorized """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_access_token(self, user_id, access_token_id): """Delete access token. :param user_id: authorizing user id :type user_id: string :param access_token_id: access token to delete :type access_token_id: string :returns: None """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def create_request_token(self, consumer_id, requested_project, request_token_duration): """Create request token. :param consumer_id: the id of the consumer :type consumer_id: string :param requested_project_id: requested project id :type requested_project_id: string :param request_token_duration: duration of request token :type request_token_duration: string :returns: request_token_ref """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_request_token(self, request_token_id): """Get request token. :param request_token_id: the id of the request token :type request_token_id: string :returns: request_token_ref """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_access_token(self, access_token_id): """Get access token. :param access_token_id: the id of the access token :type access_token_id: string :returns: access_token_ref """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def authorize_request_token(self, request_token_id, user_id, role_ids): """Authorize request token. :param request_token_id: the id of the request token, to be authorized :type request_token_id: string :param user_id: the id of the authorizing user :type user_id: string :param role_ids: list of role ids to authorize :type role_ids: list :returns: verifier """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def create_access_token(self, request_id, access_token_duration): """Create access token. :param request_id: the id of the request token, to be deleted :type request_id: string :param access_token_duration: duration of an access token :type access_token_duration: string :returns: access_token_ref """ raise exception.NotImplemented() # pragma: no cover Driver = manager.create_legacy_driver(Oauth1DriverV8) keystone-9.0.0/keystone/oauth1/validator.py0000664000567000056710000001455712701407102022131 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """oAuthlib request validator.""" import six from keystone.common import dependency from keystone import exception from keystone.oauth1 import core as oauth1 METHOD_NAME = 'oauth_validator' @dependency.requires('oauth_api') class OAuthValidator(oauth1.RequestValidator): # TODO(mhu) set as option probably? @property def enforce_ssl(self): return False @property def safe_characters(self): # oauth tokens are generated from a uuid hex value return set("abcdef0123456789") def _check_token(self, token): # generic token verification when they're obtained from a uuid hex return (set(token) <= self.safe_characters and len(token) == 32) def check_client_key(self, client_key): return self._check_token(client_key) def check_request_token(self, request_token): return self._check_token(request_token) def check_access_token(self, access_token): return self._check_token(access_token) def check_nonce(self, nonce): # Assuming length is not a concern return set(nonce) <= self.safe_characters def check_verifier(self, verifier): return (all(i in oauth1.VERIFIER_CHARS for i in verifier) and len(verifier) == 8) def get_client_secret(self, client_key, request): client = self.oauth_api.get_consumer_with_secret(client_key) return client['secret'] def get_request_token_secret(self, client_key, token, request): token_ref = self.oauth_api.get_request_token(token) return token_ref['request_secret'] def get_access_token_secret(self, client_key, token, request): access_token = self.oauth_api.get_access_token(token) return access_token['access_secret'] def get_default_realms(self, client_key, request): # realms weren't implemented with the previous library return [] def get_realms(self, token, request): return [] def get_redirect_uri(self, token, request): # OOB (out of band) is supposed to be the default value to use return 'oob' def get_rsa_key(self, client_key, request): # HMAC signing is used, so return a dummy value return '' def invalidate_request_token(self, client_key, request_token, request): # this method is invoked when an access token is generated out of a # request token, to make sure that request token cannot be consumed # anymore. This is done in the backend, so we do nothing here. pass def validate_client_key(self, client_key, request): try: return self.oauth_api.get_consumer(client_key) is not None except exception.NotFound: return False def validate_request_token(self, client_key, token, request): try: return self.oauth_api.get_request_token(token) is not None except exception.NotFound: return False def validate_access_token(self, client_key, token, request): try: return self.oauth_api.get_access_token(token) is not None except exception.NotFound: return False def validate_timestamp_and_nonce(self, client_key, timestamp, nonce, request, request_token=None, access_token=None): return True def validate_redirect_uri(self, client_key, redirect_uri, request): # we expect OOB, we don't really care return True def validate_requested_realms(self, client_key, realms, request): # realms are not used return True def validate_realms(self, client_key, token, request, uri=None, realms=None): return True def validate_verifier(self, client_key, token, verifier, request): try: req_token = self.oauth_api.get_request_token(token) return req_token['verifier'] == verifier except exception.NotFound: return False def verify_request_token(self, token, request): # there aren't strong expectations on the request token format return isinstance(token, six.string_types) def verify_realms(self, token, realms, request): return True # The following save_XXX methods are called to create tokens. I chose to # keep the original logic, but the comments below show how that could be # implemented. The real implementation logic is in the backend. def save_access_token(self, token, request): pass # token_duration = CONF.oauth1.request_token_duration # request_token_id = request.client_key # self.oauth_api.create_access_token(request_token_id, # token_duration, # token["oauth_token"], # token["oauth_token_secret"]) def save_request_token(self, token, request): pass # project_id = request.headers.get('Requested-Project-Id') # token_duration = CONF.oauth1.request_token_duration # self.oauth_api.create_request_token(request.client_key, # project_id, # token_duration, # token["oauth_token"], # token["oauth_token_secret"]) def save_verifier(self, token, verifier, request): # keep the old logic for this, as it is done in two steps and requires # information that the request validator has no access to pass keystone-9.0.0/keystone/oauth1/controllers.py0000664000567000056710000004131412701407102022501 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Extensions supporting OAuth1.""" from oslo_config import cfg from oslo_serialization import jsonutils from oslo_utils import timeutils from keystone.common import controller from keystone.common import dependency from keystone.common import utils from keystone.common import validation from keystone.common import wsgi from keystone import exception from keystone.i18n import _ from keystone import notifications from keystone.oauth1 import core as oauth1 from keystone.oauth1 import schema from keystone.oauth1 import validator CONF = cfg.CONF def _emit_user_oauth_consumer_token_invalidate(payload): # This is a special case notification that expect the payload to be a dict # containing the user_id and the consumer_id. This is so that the token # provider can invalidate any tokens in the token persistence if # token persistence is enabled notifications.Audit.internal( notifications.INVALIDATE_USER_OAUTH_CONSUMER_TOKENS, payload, ) @dependency.requires('oauth_api', 'token_provider_api') class ConsumerCrudV3(controller.V3Controller): collection_name = 'consumers' member_name = 'consumer' @classmethod def base_url(cls, context, path=None): """Construct a path and pass it to V3Controller.base_url method.""" # NOTE(stevemar): Overriding path to /OS-OAUTH1/consumers so that # V3Controller.base_url handles setting the self link correctly. path = '/OS-OAUTH1/' + cls.collection_name return controller.V3Controller.base_url(context, path=path) @controller.protected() @validation.validated(schema.consumer_create, 'consumer') def create_consumer(self, context, consumer): ref = self._assign_unique_id(self._normalize_dict(consumer)) initiator = notifications._get_request_audit_info(context) consumer_ref = self.oauth_api.create_consumer(ref, initiator) return ConsumerCrudV3.wrap_member(context, consumer_ref) @controller.protected() @validation.validated(schema.consumer_update, 'consumer') def update_consumer(self, context, consumer_id, consumer): self._require_matching_id(consumer_id, consumer) ref = self._normalize_dict(consumer) initiator = notifications._get_request_audit_info(context) ref = self.oauth_api.update_consumer(consumer_id, ref, initiator) return ConsumerCrudV3.wrap_member(context, ref) @controller.protected() def list_consumers(self, context): ref = self.oauth_api.list_consumers() return ConsumerCrudV3.wrap_collection(context, ref) @controller.protected() def get_consumer(self, context, consumer_id): ref = self.oauth_api.get_consumer(consumer_id) return ConsumerCrudV3.wrap_member(context, ref) @controller.protected() def delete_consumer(self, context, consumer_id): user_token_ref = utils.get_token_ref(context) payload = {'user_id': user_token_ref.user_id, 'consumer_id': consumer_id} _emit_user_oauth_consumer_token_invalidate(payload) initiator = notifications._get_request_audit_info(context) self.oauth_api.delete_consumer(consumer_id, initiator) @dependency.requires('oauth_api') class AccessTokenCrudV3(controller.V3Controller): collection_name = 'access_tokens' member_name = 'access_token' @classmethod def _add_self_referential_link(cls, context, ref): # NOTE(lwolf): overriding method to add proper path to self link ref.setdefault('links', {}) path = '/users/%(user_id)s/OS-OAUTH1/access_tokens' % { 'user_id': cls._get_user_id(ref) } ref['links']['self'] = cls.base_url(context, path) + '/' + ref['id'] @controller.protected() def get_access_token(self, context, user_id, access_token_id): access_token = self.oauth_api.get_access_token(access_token_id) if access_token['authorizing_user_id'] != user_id: raise exception.NotFound() access_token = self._format_token_entity(context, access_token) return AccessTokenCrudV3.wrap_member(context, access_token) @controller.protected() def list_access_tokens(self, context, user_id): auth_context = context.get('environment', {}).get('KEYSTONE_AUTH_CONTEXT', {}) if auth_context.get('is_delegated_auth'): raise exception.Forbidden( _('Cannot list request tokens' ' with a token issued via delegation.')) refs = self.oauth_api.list_access_tokens(user_id) formatted_refs = ([self._format_token_entity(context, x) for x in refs]) return AccessTokenCrudV3.wrap_collection(context, formatted_refs) @controller.protected() def delete_access_token(self, context, user_id, access_token_id): access_token = self.oauth_api.get_access_token(access_token_id) consumer_id = access_token['consumer_id'] payload = {'user_id': user_id, 'consumer_id': consumer_id} _emit_user_oauth_consumer_token_invalidate(payload) initiator = notifications._get_request_audit_info(context) return self.oauth_api.delete_access_token( user_id, access_token_id, initiator) @staticmethod def _get_user_id(entity): return entity.get('authorizing_user_id', '') def _format_token_entity(self, context, entity): formatted_entity = entity.copy() access_token_id = formatted_entity['id'] user_id = self._get_user_id(formatted_entity) if 'role_ids' in entity: formatted_entity.pop('role_ids') if 'access_secret' in entity: formatted_entity.pop('access_secret') url = ('/users/%(user_id)s/OS-OAUTH1/access_tokens/%(access_token_id)s' '/roles' % {'user_id': user_id, 'access_token_id': access_token_id}) formatted_entity.setdefault('links', {}) formatted_entity['links']['roles'] = (self.base_url(context, url)) return formatted_entity @dependency.requires('oauth_api', 'role_api') class AccessTokenRolesV3(controller.V3Controller): collection_name = 'roles' member_name = 'role' @controller.protected() def list_access_token_roles(self, context, user_id, access_token_id): access_token = self.oauth_api.get_access_token(access_token_id) if access_token['authorizing_user_id'] != user_id: raise exception.NotFound() authed_role_ids = access_token['role_ids'] authed_role_ids = jsonutils.loads(authed_role_ids) refs = ([self._format_role_entity(x) for x in authed_role_ids]) return AccessTokenRolesV3.wrap_collection(context, refs) @controller.protected() def get_access_token_role(self, context, user_id, access_token_id, role_id): access_token = self.oauth_api.get_access_token(access_token_id) if access_token['authorizing_user_id'] != user_id: raise exception.Unauthorized(_('User IDs do not match')) authed_role_ids = access_token['role_ids'] authed_role_ids = jsonutils.loads(authed_role_ids) for authed_role_id in authed_role_ids: if authed_role_id == role_id: role = self._format_role_entity(role_id) return AccessTokenRolesV3.wrap_member(context, role) raise exception.RoleNotFound(role_id=role_id) def _format_role_entity(self, role_id): role = self.role_api.get_role(role_id) formatted_entity = role.copy() if 'description' in role: formatted_entity.pop('description') if 'enabled' in role: formatted_entity.pop('enabled') return formatted_entity @dependency.requires('assignment_api', 'oauth_api', 'resource_api', 'token_provider_api') class OAuthControllerV3(controller.V3Controller): collection_name = 'not_used' member_name = 'not_used' def create_request_token(self, context): headers = context['headers'] oauth_headers = oauth1.get_oauth_headers(headers) consumer_id = oauth_headers.get('oauth_consumer_key') requested_project_id = headers.get('Requested-Project-Id') if not consumer_id: raise exception.ValidationError( attribute='oauth_consumer_key', target='request') if not requested_project_id: raise exception.ValidationError( attribute='requested_project_id', target='request') # NOTE(stevemar): Ensure consumer and requested project exist self.resource_api.get_project(requested_project_id) self.oauth_api.get_consumer(consumer_id) url = self.base_url(context, context['path']) req_headers = {'Requested-Project-Id': requested_project_id} req_headers.update(headers) request_verifier = oauth1.RequestTokenEndpoint( request_validator=validator.OAuthValidator(), token_generator=oauth1.token_generator) h, b, s = request_verifier.create_request_token_response( url, http_method='POST', body=context['query_string'], headers=req_headers) if (not b) or int(s) > 399: msg = _('Invalid signature') raise exception.Unauthorized(message=msg) request_token_duration = CONF.oauth1.request_token_duration initiator = notifications._get_request_audit_info(context) token_ref = self.oauth_api.create_request_token(consumer_id, requested_project_id, request_token_duration, initiator) result = ('oauth_token=%(key)s&oauth_token_secret=%(secret)s' % {'key': token_ref['id'], 'secret': token_ref['request_secret']}) if CONF.oauth1.request_token_duration: expiry_bit = '&oauth_expires_at=%s' % token_ref['expires_at'] result += expiry_bit headers = [('Content-Type', 'application/x-www-urlformencoded')] response = wsgi.render_response(result, status=(201, 'Created'), headers=headers) return response def create_access_token(self, context): headers = context['headers'] oauth_headers = oauth1.get_oauth_headers(headers) consumer_id = oauth_headers.get('oauth_consumer_key') request_token_id = oauth_headers.get('oauth_token') oauth_verifier = oauth_headers.get('oauth_verifier') if not consumer_id: raise exception.ValidationError( attribute='oauth_consumer_key', target='request') if not request_token_id: raise exception.ValidationError( attribute='oauth_token', target='request') if not oauth_verifier: raise exception.ValidationError( attribute='oauth_verifier', target='request') req_token = self.oauth_api.get_request_token( request_token_id) expires_at = req_token['expires_at'] if expires_at: now = timeutils.utcnow() expires = timeutils.normalize_time( timeutils.parse_isotime(expires_at)) if now > expires: raise exception.Unauthorized(_('Request token is expired')) url = self.base_url(context, context['path']) access_verifier = oauth1.AccessTokenEndpoint( request_validator=validator.OAuthValidator(), token_generator=oauth1.token_generator) h, b, s = access_verifier.create_access_token_response( url, http_method='POST', body=context['query_string'], headers=headers) params = oauth1.extract_non_oauth_params(b) if params: msg = _('There should not be any non-oauth parameters') raise exception.Unauthorized(message=msg) if req_token['consumer_id'] != consumer_id: msg = _('provided consumer key does not match stored consumer key') raise exception.Unauthorized(message=msg) if req_token['verifier'] != oauth_verifier: msg = _('provided verifier does not match stored verifier') raise exception.Unauthorized(message=msg) if req_token['id'] != request_token_id: msg = _('provided request key does not match stored request key') raise exception.Unauthorized(message=msg) if not req_token.get('authorizing_user_id'): msg = _('Request Token does not have an authorizing user id') raise exception.Unauthorized(message=msg) access_token_duration = CONF.oauth1.access_token_duration initiator = notifications._get_request_audit_info(context) token_ref = self.oauth_api.create_access_token(request_token_id, access_token_duration, initiator) result = ('oauth_token=%(key)s&oauth_token_secret=%(secret)s' % {'key': token_ref['id'], 'secret': token_ref['access_secret']}) if CONF.oauth1.access_token_duration: expiry_bit = '&oauth_expires_at=%s' % (token_ref['expires_at']) result += expiry_bit headers = [('Content-Type', 'application/x-www-urlformencoded')] response = wsgi.render_response(result, status=(201, 'Created'), headers=headers) return response @controller.protected() def authorize_request_token(self, context, request_token_id, roles): """An authenticated user is going to authorize a request token. As a security precaution, the requested roles must match those in the request token. Because this is in a CLI-only world at the moment, there is not another easy way to make sure the user knows which roles are being requested before authorizing. """ auth_context = context.get('environment', {}).get('KEYSTONE_AUTH_CONTEXT', {}) if auth_context.get('is_delegated_auth'): raise exception.Forbidden( _('Cannot authorize a request token' ' with a token issued via delegation.')) req_token = self.oauth_api.get_request_token(request_token_id) expires_at = req_token['expires_at'] if expires_at: now = timeutils.utcnow() expires = timeutils.normalize_time( timeutils.parse_isotime(expires_at)) if now > expires: raise exception.Unauthorized(_('Request token is expired')) # put the roles in a set for easy comparison authed_roles = set() for role in roles: authed_roles.add(role['id']) # verify the authorizing user has the roles user_token = utils.get_token_ref(context) user_id = user_token.user_id project_id = req_token['requested_project_id'] user_roles = self.assignment_api.get_roles_for_user_and_project( user_id, project_id) cred_set = set(user_roles) if not cred_set.issuperset(authed_roles): msg = _('authorizing user does not have role required') raise exception.Unauthorized(message=msg) # create list of just the id's for the backend role_ids = list(authed_roles) # verify the user has the project too req_project_id = req_token['requested_project_id'] user_projects = self.assignment_api.list_projects_for_user(user_id) for user_project in user_projects: if user_project['id'] == req_project_id: break else: msg = _("User is not a member of the requested project") raise exception.Unauthorized(message=msg) # finally authorize the token authed_token = self.oauth_api.authorize_request_token( request_token_id, user_id, role_ids) to_return = {'token': {'oauth_verifier': authed_token['verifier']}} return to_return keystone-9.0.0/keystone/oauth1/routers.py0000664000567000056710000001413612701407102021640 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools from keystone.common import json_home from keystone.common import wsgi from keystone.oauth1 import controllers build_resource_relation = functools.partial( json_home.build_v3_extension_resource_relation, extension_name='OS-OAUTH1', extension_version='1.0') build_parameter_relation = functools.partial( json_home.build_v3_extension_parameter_relation, extension_name='OS-OAUTH1', extension_version='1.0') ACCESS_TOKEN_ID_PARAMETER_RELATION = build_parameter_relation( parameter_name='access_token_id') class Routers(wsgi.RoutersBase): """API Endpoints for the OAuth1 extension. The goal of this extension is to allow third-party service providers to acquire tokens with a limited subset of a user's roles for acting on behalf of that user. This is done using an oauth-similar flow and api. The API looks like:: # Basic admin-only consumer crud POST /OS-OAUTH1/consumers GET /OS-OAUTH1/consumers PATCH /OS-OAUTH1/consumers/{consumer_id} GET /OS-OAUTH1/consumers/{consumer_id} DELETE /OS-OAUTH1/consumers/{consumer_id} # User access token crud GET /users/{user_id}/OS-OAUTH1/access_tokens GET /users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id} GET /users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id}/roles GET /users/{user_id}/OS-OAUTH1/access_tokens /{access_token_id}/roles/{role_id} DELETE /users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id} # OAuth interfaces POST /OS-OAUTH1/request_token # create a request token PUT /OS-OAUTH1/authorize # authorize a request token POST /OS-OAUTH1/access_token # create an access token """ def append_v3_routers(self, mapper, routers): consumer_controller = controllers.ConsumerCrudV3() access_token_controller = controllers.AccessTokenCrudV3() access_token_roles_controller = controllers.AccessTokenRolesV3() oauth_controller = controllers.OAuthControllerV3() # basic admin-only consumer crud self._add_resource( mapper, consumer_controller, path='/OS-OAUTH1/consumers', get_action='list_consumers', post_action='create_consumer', rel=build_resource_relation(resource_name='consumers')) self._add_resource( mapper, consumer_controller, path='/OS-OAUTH1/consumers/{consumer_id}', get_action='get_consumer', patch_action='update_consumer', delete_action='delete_consumer', rel=build_resource_relation(resource_name='consumer'), path_vars={ 'consumer_id': build_parameter_relation(parameter_name='consumer_id'), }) # user access token crud self._add_resource( mapper, access_token_controller, path='/users/{user_id}/OS-OAUTH1/access_tokens', get_action='list_access_tokens', rel=build_resource_relation(resource_name='user_access_tokens'), path_vars={ 'user_id': json_home.Parameters.USER_ID, }) self._add_resource( mapper, access_token_controller, path='/users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id}', get_action='get_access_token', delete_action='delete_access_token', rel=build_resource_relation(resource_name='user_access_token'), path_vars={ 'access_token_id': ACCESS_TOKEN_ID_PARAMETER_RELATION, 'user_id': json_home.Parameters.USER_ID, }) self._add_resource( mapper, access_token_roles_controller, path='/users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id}/' 'roles', get_action='list_access_token_roles', rel=build_resource_relation( resource_name='user_access_token_roles'), path_vars={ 'access_token_id': ACCESS_TOKEN_ID_PARAMETER_RELATION, 'user_id': json_home.Parameters.USER_ID, }) self._add_resource( mapper, access_token_roles_controller, path='/users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id}/' 'roles/{role_id}', get_action='get_access_token_role', rel=build_resource_relation( resource_name='user_access_token_role'), path_vars={ 'access_token_id': ACCESS_TOKEN_ID_PARAMETER_RELATION, 'role_id': json_home.Parameters.ROLE_ID, 'user_id': json_home.Parameters.USER_ID, }) # oauth flow calls self._add_resource( mapper, oauth_controller, path='/OS-OAUTH1/request_token', post_action='create_request_token', rel=build_resource_relation(resource_name='request_tokens')) self._add_resource( mapper, oauth_controller, path='/OS-OAUTH1/access_token', post_action='create_access_token', rel=build_resource_relation(resource_name='access_tokens')) self._add_resource( mapper, oauth_controller, path='/OS-OAUTH1/authorize/{request_token_id}', path_vars={ 'request_token_id': build_parameter_relation(parameter_name='request_token_id') }, put_action='authorize_request_token', rel=build_resource_relation( resource_name='authorize_request_token')) keystone-9.0.0/keystone/exception.py0000664000567000056710000004353612701407102020740 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log from oslo_utils import encodeutils import six from keystone.i18n import _, _LW CONF = cfg.CONF LOG = log.getLogger(__name__) # Tests use this to make exception message format errors fatal _FATAL_EXCEPTION_FORMAT_ERRORS = False def _format_with_unicode_kwargs(msg_format, kwargs): try: return msg_format % kwargs except UnicodeDecodeError: try: kwargs = {k: encodeutils.safe_decode(v) for k, v in kwargs.items()} except UnicodeDecodeError: # NOTE(jamielennox): This is the complete failure case # at least by showing the template we have some idea # of where the error is coming from return msg_format return msg_format % kwargs class Error(Exception): """Base error class. Child classes should define an HTTP status code, title, and a message_format. """ code = None title = None message_format = None def __init__(self, message=None, **kwargs): try: message = self._build_message(message, **kwargs) except KeyError: # if you see this warning in your logs, please raise a bug report if _FATAL_EXCEPTION_FORMAT_ERRORS: raise else: LOG.warning(_LW('missing exception kwargs (programmer error)')) message = self.message_format super(Error, self).__init__(message) def _build_message(self, message, **kwargs): """Builds and returns an exception message. :raises KeyError: given insufficient kwargs """ if message: return message return _format_with_unicode_kwargs(self.message_format, kwargs) class ValidationError(Error): message_format = _("Expecting to find %(attribute)s in %(target)s -" " the server could not comply with the request" " since it is either malformed or otherwise" " incorrect. The client is assumed to be in error.") code = 400 title = 'Bad Request' class URLValidationError(ValidationError): message_format = _("Cannot create an endpoint with an invalid URL:" " %(url)s") class SchemaValidationError(ValidationError): # NOTE(lbragstad): For whole OpenStack message consistency, this error # message has been written in a format consistent with WSME. message_format = _("%(detail)s") class ValidationTimeStampError(Error): message_format = _("Timestamp not in expected format." " The server could not comply with the request" " since it is either malformed or otherwise" " incorrect. The client is assumed to be in error.") code = 400 title = 'Bad Request' class ValidationExpirationError(Error): message_format = _("The 'expires_at' must not be before now." " The server could not comply with the request" " since it is either malformed or otherwise" " incorrect. The client is assumed to be in error.") code = 400 title = 'Bad Request' class StringLengthExceeded(ValidationError): message_format = _("String length exceeded.The length of" " string '%(string)s' exceeded the limit" " of column %(type)s(CHAR(%(length)d)).") class ValidationSizeError(Error): message_format = _("Request attribute %(attribute)s must be" " less than or equal to %(size)i. The server" " could not comply with the request because" " the attribute size is invalid (too large)." " The client is assumed to be in error.") code = 400 title = 'Bad Request' class CircularRegionHierarchyError(Error): message_format = _("The specified parent region %(parent_region_id)s " "would create a circular region hierarchy.") code = 400 title = 'Bad Request' class ForbiddenNotSecurity(Error): """When you want to return a 403 Forbidden response but not security. Use this for errors where the message is always safe to present to the user and won't give away extra information. """ code = 403 title = 'Forbidden' class PasswordVerificationError(ForbiddenNotSecurity): message_format = _("The password length must be less than or equal " "to %(size)i. The server could not comply with the " "request because the password is invalid.") class RegionDeletionError(ForbiddenNotSecurity): message_format = _("Unable to delete region %(region_id)s because it or " "its child regions have associated endpoints.") class PKITokenExpected(ForbiddenNotSecurity): message_format = _('The certificates you requested are not available. ' 'It is likely that this server does not use PKI tokens ' 'otherwise this is the result of misconfiguration.') class SecurityError(Error): """Security error exception. Avoids exposing details of security errors, unless in insecure_debug mode. """ amendment = _('(Disable insecure_debug mode to suppress these details.)') def _build_message(self, message, **kwargs): """Only returns detailed messages in insecure_debug mode.""" if message and CONF.insecure_debug: if isinstance(message, six.string_types): # Only do replacement if message is string. The message is # sometimes a different exception or bytes, which would raise # TypeError. message = _format_with_unicode_kwargs(message, kwargs) return _('%(message)s %(amendment)s') % { 'message': message, 'amendment': self.amendment} return _format_with_unicode_kwargs(self.message_format, kwargs) class Unauthorized(SecurityError): message_format = _("The request you have made requires authentication.") code = 401 title = 'Unauthorized' class AuthPluginException(Unauthorized): message_format = _("Authentication plugin error.") def __init__(self, *args, **kwargs): super(AuthPluginException, self).__init__(*args, **kwargs) self.authentication = {} class MissingGroups(Unauthorized): message_format = _("Unable to find valid groups while using " "mapping %(mapping_id)s") class AuthMethodNotSupported(AuthPluginException): message_format = _("Attempted to authenticate with an unsupported method.") def __init__(self, *args, **kwargs): super(AuthMethodNotSupported, self).__init__(*args, **kwargs) self.authentication = {'methods': CONF.auth.methods} class AdditionalAuthRequired(AuthPluginException): message_format = _("Additional authentications steps required.") def __init__(self, auth_response=None, **kwargs): super(AdditionalAuthRequired, self).__init__(message=None, **kwargs) self.authentication = auth_response class Forbidden(SecurityError): message_format = _("You are not authorized to perform the" " requested action.") code = 403 title = 'Forbidden' class ForbiddenAction(Forbidden): message_format = _("You are not authorized to perform the" " requested action: %(action)s") class ImmutableAttributeError(Forbidden): message_format = _("Could not change immutable attribute(s) " "'%(attributes)s' in target %(target)s") class CrossBackendNotAllowed(Forbidden): message_format = _("Group membership across backend boundaries is not " "allowed, group in question is %(group_id)s, " "user is %(user_id)s") class InvalidPolicyAssociation(Forbidden): message_format = _("Invalid mix of entities for policy association - " "only Endpoint, Service or Region+Service allowed. " "Request was - Endpoint: %(endpoint_id)s, " "Service: %(service_id)s, Region: %(region_id)s") class InvalidDomainConfig(Forbidden): message_format = _("Invalid domain specific configuration: %(reason)s") class NotFound(Error): message_format = _("Could not find: %(target)s") code = 404 title = 'Not Found' class EndpointNotFound(NotFound): message_format = _("Could not find endpoint: %(endpoint_id)s") class MetadataNotFound(NotFound): # NOTE (dolph): metadata is not a user-facing concept, # so this exception should not be exposed. message_format = _("An unhandled exception has occurred:" " Could not find metadata.") class PolicyNotFound(NotFound): message_format = _("Could not find policy: %(policy_id)s") class PolicyAssociationNotFound(NotFound): message_format = _("Could not find policy association") class RoleNotFound(NotFound): message_format = _("Could not find role: %(role_id)s") class ImpliedRoleNotFound(NotFound): message_format = _("%(prior_role_id)s does not imply %(implied_role_id)s") class InvalidImpliedRole(Forbidden): message_format = _("%(role_id)s cannot be an implied roles") class RoleAssignmentNotFound(NotFound): message_format = _("Could not find role assignment with role: " "%(role_id)s, user or group: %(actor_id)s, " "project or domain: %(target_id)s") class RegionNotFound(NotFound): message_format = _("Could not find region: %(region_id)s") class ServiceNotFound(NotFound): message_format = _("Could not find service: %(service_id)s") class DomainNotFound(NotFound): message_format = _("Could not find domain: %(domain_id)s") class ProjectNotFound(NotFound): message_format = _("Could not find project: %(project_id)s") class InvalidParentProject(NotFound): message_format = _("Cannot create project with parent: %(project_id)s") class TokenNotFound(NotFound): message_format = _("Could not find token: %(token_id)s") class UserNotFound(NotFound): message_format = _("Could not find user: %(user_id)s") class GroupNotFound(NotFound): message_format = _("Could not find group: %(group_id)s") class MappingNotFound(NotFound): message_format = _("Could not find mapping: %(mapping_id)s") class TrustNotFound(NotFound): message_format = _("Could not find trust: %(trust_id)s") class TrustUseLimitReached(Forbidden): message_format = _("No remaining uses for trust: %(trust_id)s") class CredentialNotFound(NotFound): message_format = _("Could not find credential: %(credential_id)s") class VersionNotFound(NotFound): message_format = _("Could not find version: %(version)s") class EndpointGroupNotFound(NotFound): message_format = _("Could not find Endpoint Group: %(endpoint_group_id)s") class IdentityProviderNotFound(NotFound): message_format = _("Could not find Identity Provider: %(idp_id)s") class ServiceProviderNotFound(NotFound): message_format = _("Could not find Service Provider: %(sp_id)s") class FederatedProtocolNotFound(NotFound): message_format = _("Could not find federated protocol %(protocol_id)s for" " Identity Provider: %(idp_id)s") class PublicIDNotFound(NotFound): # This is used internally and mapped to either User/GroupNotFound or, # Assertion before the exception leaves Keystone. message_format = "%(id)s" class DomainConfigNotFound(NotFound): message_format = _('Could not find %(group_or_option)s in domain ' 'configuration for domain %(domain_id)s') class ConfigRegistrationNotFound(Exception): # This is used internally between the domain config backend and the # manager, so should not escape to the client. If it did, it is a coding # error on our part, and would end up, appropriately, as a 500 error. pass class KeystoneConfigurationError(Exception): # This is an exception to be used in the case that Keystone config is # invalid and Keystone should not start. pass class Conflict(Error): message_format = _("Conflict occurred attempting to store %(type)s -" " %(details)s") code = 409 title = 'Conflict' class UnexpectedError(SecurityError): """Avoids exposing details of failures, unless in insecure_debug mode.""" message_format = _("An unexpected error prevented the server " "from fulfilling your request.") debug_message_format = _("An unexpected error prevented the server " "from fulfilling your request: %(exception)s") def _build_message(self, message, **kwargs): # Ensure that exception has a value to be extra defensive for # substitutions and make sure the exception doesn't raise an # exception. kwargs.setdefault('exception', '') return super(UnexpectedError, self)._build_message( message or self.debug_message_format, **kwargs) code = 500 title = 'Internal Server Error' class TrustConsumeMaximumAttempt(UnexpectedError): debug_message_format = _("Unable to consume trust %(trust_id)s, unable to " "acquire lock.") class CertificateFilesUnavailable(UnexpectedError): debug_message_format = _("Expected signing certificates are not available " "on the server. Please check Keystone " "configuration.") class MalformedEndpoint(UnexpectedError): debug_message_format = _("Malformed endpoint URL (%(endpoint)s)," " see ERROR log for details.") class MappedGroupNotFound(UnexpectedError): debug_message_format = _("Group %(group_id)s returned by mapping " "%(mapping_id)s was not found in the backend.") class MetadataFileError(UnexpectedError): debug_message_format = _("Error while reading metadata file, %(reason)s") class DirectMappingError(UnexpectedError): message_format = _("Local section in mapping %(mapping_id)s refers to a " "remote match that doesn't exist " "(e.g. {0} in a local section).") class AssignmentTypeCalculationError(UnexpectedError): debug_message_format = _( 'Unexpected combination of grant attributes - ' 'User: %(user_id)s, Group: %(group_id)s, Project: %(project_id)s, ' 'Domain: %(domain_id)s') class NotImplemented(Error): message_format = _("The action you have requested has not" " been implemented.") code = 501 title = 'Not Implemented' class Gone(Error): message_format = _("The service you have requested is no" " longer available on this server.") code = 410 title = 'Gone' class ConfigFileNotFound(UnexpectedError): debug_message_format = _("The Keystone configuration file %(config_file)s " "could not be found.") class KeysNotFound(UnexpectedError): debug_message_format = _('No encryption keys found; run keystone-manage ' 'fernet_setup to bootstrap one.') class MultipleSQLDriversInConfig(UnexpectedError): debug_message_format = _('The Keystone domain-specific configuration has ' 'specified more than one SQL driver (only one is ' 'permitted): %(source)s.') class MigrationNotProvided(Exception): def __init__(self, mod_name, path): super(MigrationNotProvided, self).__init__(_( "%(mod_name)s doesn't provide database migrations. The migration" " repository path at %(path)s doesn't exist or isn't a directory." ) % {'mod_name': mod_name, 'path': path}) class UnsupportedTokenVersionException(UnexpectedError): debug_message_format = _('Token version is unrecognizable or ' 'unsupported.') class SAMLSigningError(UnexpectedError): debug_message_format = _('Unable to sign SAML assertion. It is likely ' 'that this server does not have xmlsec1 ' 'installed, or this is the result of ' 'misconfiguration. Reason %(reason)s') class OAuthHeadersMissingError(UnexpectedError): debug_message_format = _('No Authorization headers found, cannot proceed ' 'with OAuth related calls, if running under ' 'HTTPd or Apache, ensure WSGIPassAuthorization ' 'is set to On.') class TokenlessAuthConfigError(ValidationError): message_format = _('Could not determine Identity Provider ID. The ' 'configuration option %(issuer_attribute)s ' 'was not found in the request environment.') class MigrationMovedFailure(RuntimeError): def __init__(self, extension): self.extension = extension msg = _("The %s extension has been moved into keystone core and as " "such its migrations are maintained by the main keystone " "database control. Use the command: keystone-manage " "db_sync") % self.extension super(MigrationMovedFailure, self).__init__(msg) class UnsupportedDriverVersion(UnexpectedError): debug_message_format = _('%(driver)s is not supported driver version') keystone-9.0.0/keystone/version/0000775000567000056710000000000012701407246020053 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/version/__init__.py0000664000567000056710000000000012701407102022141 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/version/service.py0000664000567000056710000001311512701407102022055 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import sys from oslo_config import cfg from oslo_log import log from paste import deploy import routes from keystone.assignment import routers as assignment_routers from keystone.auth import routers as auth_routers from keystone.catalog import routers as catalog_routers from keystone.common import wsgi from keystone.credential import routers as credential_routers from keystone.endpoint_policy import routers as endpoint_policy_routers from keystone.federation import routers as federation_routers from keystone.i18n import _LW from keystone.identity import routers as identity_routers from keystone.oauth1 import routers as oauth1_routers from keystone.policy import routers as policy_routers from keystone.resource import routers as resource_routers from keystone.revoke import routers as revoke_routers from keystone.token import _simple_cert as simple_cert_ext from keystone.token import routers as token_routers from keystone.trust import routers as trust_routers from keystone.v2_crud import admin_crud from keystone.v2_crud import user_crud from keystone.version import controllers from keystone.version import routers CONF = cfg.CONF LOG = log.getLogger(__name__) def loadapp(conf, name): # NOTE(blk-u): Save the application being loaded in the controllers module. # This is similar to how public_app_factory() and v3_app_factory() # register the version with the controllers module. controllers.latest_app = deploy.loadapp(conf, name=name) return controllers.latest_app def fail_gracefully(f): """Logs exceptions and aborts.""" @functools.wraps(f) def wrapper(*args, **kw): try: return f(*args, **kw) except Exception as e: LOG.debug(e, exc_info=True) # exception message is printed to all logs LOG.critical(e) sys.exit(1) return wrapper def warn_local_conf(f): @functools.wraps(f) def wrapper(*args, **local_conf): if local_conf: LOG.warning(_LW('\'local conf\' from PasteDeploy INI is being ' 'ignored.')) return f(*args, **local_conf) return wrapper @fail_gracefully @warn_local_conf def public_app_factory(global_conf, **local_conf): controllers.register_version('v2.0') return wsgi.ComposingRouter(routes.Mapper(), [assignment_routers.Public(), token_routers.Router(), user_crud.Router(), routers.VersionV2('public'), routers.Extension(False)]) @fail_gracefully @warn_local_conf def admin_app_factory(global_conf, **local_conf): controllers.register_version('v2.0') return wsgi.ComposingRouter(routes.Mapper(), [identity_routers.Admin(), assignment_routers.Admin(), token_routers.Router(), resource_routers.Admin(), admin_crud.Router(), routers.VersionV2('admin'), routers.Extension()]) @fail_gracefully @warn_local_conf def public_version_app_factory(global_conf, **local_conf): return wsgi.ComposingRouter(routes.Mapper(), [routers.Versions('public')]) @fail_gracefully @warn_local_conf def admin_version_app_factory(global_conf, **local_conf): return wsgi.ComposingRouter(routes.Mapper(), [routers.Versions('admin')]) @fail_gracefully @warn_local_conf def v3_app_factory(global_conf, **local_conf): controllers.register_version('v3') mapper = routes.Mapper() sub_routers = [] _routers = [] # NOTE(dstanek): Routers should be ordered by their frequency of use in # a live system. This is due to the routes implementation. The most # frequently used routers should appear first. all_api_routers = [auth_routers, assignment_routers, catalog_routers, credential_routers, identity_routers, policy_routers, resource_routers, revoke_routers, federation_routers, oauth1_routers, # TODO(morganfainberg): Remove the simple_cert router # when PKI and PKIZ tokens are removed. simple_cert_ext] if CONF.trust.enabled: all_api_routers.append(trust_routers) if CONF.endpoint_policy.enabled: all_api_routers.append(endpoint_policy_routers) for api_routers in all_api_routers: routers_instance = api_routers.Routers() _routers.append(routers_instance) routers_instance.append_v3_routers(mapper, sub_routers) # Add in the v3 version api sub_routers.append(routers.VersionV3('public', _routers)) return wsgi.ComposingRouter(mapper, sub_routers) keystone-9.0.0/keystone/version/controllers.py0000664000567000056710000001537612701407102022776 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils import webob from keystone.common import extension from keystone.common import json_home from keystone.common import wsgi from keystone import exception MEDIA_TYPE_JSON = 'application/vnd.openstack.identity-%s+json' _VERSIONS = [] # NOTE(blk-u): latest_app will be set by keystone.version.service.loadapp(). It # gets set to the application that was just loaded. In the case of keystone-all # loadapp() gets called twice, once for the public app and once for the admin # app. In the case of httpd/keystone, loadapp() gets called once for the public # app if this is the public instance or loadapp() gets called for the admin app # if it's the admin instance. # This is used to fetch the /v3 JSON Home response. The /v3 JSON Home response # is the same whether it's the admin or public service so either admin or # public works. latest_app = None def request_v3_json_home(new_prefix): if 'v3' not in _VERSIONS: # No V3 support, so return an empty JSON Home document. return {'resources': {}} req = webob.Request.blank( '/v3', headers={'Accept': 'application/json-home'}) v3_json_home_str = req.get_response(latest_app).body v3_json_home = jsonutils.loads(v3_json_home_str) json_home.translate_urls(v3_json_home, new_prefix) return v3_json_home class Extensions(wsgi.Application): """Base extensions controller to be extended by public and admin API's.""" # extend in subclass to specify the set of extensions @property def extensions(self): return None def get_extensions_info(self, context): return {'extensions': {'values': list(self.extensions.values())}} def get_extension_info(self, context, extension_alias): try: return {'extension': self.extensions[extension_alias]} except KeyError: raise exception.NotFound(target=extension_alias) class AdminExtensions(Extensions): @property def extensions(self): return extension.ADMIN_EXTENSIONS class PublicExtensions(Extensions): @property def extensions(self): return extension.PUBLIC_EXTENSIONS def register_version(version): _VERSIONS.append(version) class MimeTypes(object): JSON = 'application/json' JSON_HOME = 'application/json-home' def v3_mime_type_best_match(context): # accept_header is a WebOb MIMEAccept object so supports best_match. accept_header = context['accept_header'] if not accept_header: return MimeTypes.JSON SUPPORTED_TYPES = [MimeTypes.JSON, MimeTypes.JSON_HOME] return accept_header.best_match(SUPPORTED_TYPES) class Version(wsgi.Application): def __init__(self, version_type, routers=None): self.endpoint_url_type = version_type self._routers = routers super(Version, self).__init__() def _get_identity_url(self, context, version): """Returns a URL to keystone's own endpoint.""" url = self.base_url(context, self.endpoint_url_type) return '%s/%s/' % (url, version) def _get_versions_list(self, context): """The list of versions is dependent on the context.""" versions = {} if 'v2.0' in _VERSIONS: versions['v2.0'] = { 'id': 'v2.0', 'status': 'stable', 'updated': '2014-04-17T00:00:00Z', 'links': [ { 'rel': 'self', 'href': self._get_identity_url(context, 'v2.0'), }, { 'rel': 'describedby', 'type': 'text/html', 'href': 'http://docs.openstack.org/' } ], 'media-types': [ { 'base': 'application/json', 'type': MEDIA_TYPE_JSON % 'v2.0' } ] } if 'v3' in _VERSIONS: versions['v3'] = { 'id': 'v3.6', 'status': 'stable', 'updated': '2016-04-04T00:00:00Z', 'links': [ { 'rel': 'self', 'href': self._get_identity_url(context, 'v3'), } ], 'media-types': [ { 'base': 'application/json', 'type': MEDIA_TYPE_JSON % 'v3' } ] } return versions def get_versions(self, context): req_mime_type = v3_mime_type_best_match(context) if req_mime_type == MimeTypes.JSON_HOME: v3_json_home = request_v3_json_home('/v3') return wsgi.render_response( body=v3_json_home, headers=(('Content-Type', MimeTypes.JSON_HOME),)) versions = self._get_versions_list(context) return wsgi.render_response(status=(300, 'Multiple Choices'), body={ 'versions': { 'values': list(versions.values()) } }) def get_version_v2(self, context): versions = self._get_versions_list(context) if 'v2.0' in _VERSIONS: return wsgi.render_response(body={ 'version': versions['v2.0'] }) else: raise exception.VersionNotFound(version='v2.0') def _get_json_home_v3(self): def all_resources(): for router in self._routers: for resource in router.v3_resources: yield resource return { 'resources': dict(all_resources()) } def get_version_v3(self, context): versions = self._get_versions_list(context) if 'v3' in _VERSIONS: req_mime_type = v3_mime_type_best_match(context) if req_mime_type == MimeTypes.JSON_HOME: return wsgi.render_response( body=self._get_json_home_v3(), headers=(('Content-Type', MimeTypes.JSON_HOME),)) return wsgi.render_response(body={ 'version': versions['v3'] }) else: raise exception.VersionNotFound(version='v3') keystone-9.0.0/keystone/version/routers.py0000664000567000056710000000541112701407102022120 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ The only types of routers in this file should be ``ComposingRouters``. The routers for the backends should be in the backend-specific router modules. For example, the ``ComposableRouter`` for ``identity`` belongs in:: keystone.identity.routers """ from keystone.common import wsgi from keystone.version import controllers class Extension(wsgi.ComposableRouter): def __init__(self, is_admin=True): if is_admin: self.controller = controllers.AdminExtensions() else: self.controller = controllers.PublicExtensions() def add_routes(self, mapper): extensions_controller = self.controller mapper.connect('/extensions', controller=extensions_controller, action='get_extensions_info', conditions=dict(method=['GET'])) mapper.connect('/extensions/{extension_alias}', controller=extensions_controller, action='get_extension_info', conditions=dict(method=['GET'])) class VersionV2(wsgi.ComposableRouter): def __init__(self, description): self.description = description def add_routes(self, mapper): version_controller = controllers.Version(self.description) mapper.connect('/', controller=version_controller, action='get_version_v2') class VersionV3(wsgi.ComposableRouter): def __init__(self, description, routers): self.description = description self._routers = routers def add_routes(self, mapper): version_controller = controllers.Version(self.description, routers=self._routers) mapper.connect('/', controller=version_controller, action='get_version_v3') class Versions(wsgi.ComposableRouter): def __init__(self, description): self.description = description def add_routes(self, mapper): version_controller = controllers.Version(self.description) mapper.connect('/', controller=version_controller, action='get_versions') keystone-9.0.0/keystone/revoke/0000775000567000056710000000000012701407246017661 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/revoke/backends/0000775000567000056710000000000012701407246021433 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/revoke/backends/__init__.py0000664000567000056710000000000012701407102023521 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/revoke/backends/sql.py0000664000567000056710000001014412701407102022573 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common import sql from keystone.models import revoke_model from keystone import revoke class RevocationEvent(sql.ModelBase, sql.ModelDictMixin): __tablename__ = 'revocation_event' attributes = revoke_model.REVOKE_KEYS # The id field is not going to be exposed to the outside world. # It is, however, necessary for SQLAlchemy. id = sql.Column(sql.Integer, primary_key=True, nullable=False) domain_id = sql.Column(sql.String(64)) project_id = sql.Column(sql.String(64)) user_id = sql.Column(sql.String(64)) role_id = sql.Column(sql.String(64)) trust_id = sql.Column(sql.String(64)) consumer_id = sql.Column(sql.String(64)) access_token_id = sql.Column(sql.String(64)) issued_before = sql.Column(sql.DateTime(), nullable=False) expires_at = sql.Column(sql.DateTime()) revoked_at = sql.Column(sql.DateTime(), nullable=False, index=True) audit_id = sql.Column(sql.String(32)) audit_chain_id = sql.Column(sql.String(32)) class Revoke(revoke.RevokeDriverV8): def _flush_batch_size(self, dialect): batch_size = 0 if dialect == 'ibm_db_sa': # This functionality is limited to DB2, because # it is necessary to prevent the transaction log # from filling up, whereas at least some of the # other supported databases do not support update # queries with LIMIT subqueries nor do they appear # to require the use of such queries when deleting # large numbers of records at once. batch_size = 100 # Limit of 100 is known to not fill a transaction log # of default maximum size while not significantly # impacting the performance of large token purges on # systems where the maximum transaction log size has # been increased beyond the default. return batch_size def _prune_expired_events(self): oldest = revoke.revoked_before_cutoff_time() with sql.session_for_write() as session: dialect = session.bind.dialect.name batch_size = self._flush_batch_size(dialect) if batch_size > 0: query = session.query(RevocationEvent.id) query = query.filter(RevocationEvent.revoked_at < oldest) query = query.limit(batch_size).subquery() delete_query = (session.query(RevocationEvent). filter(RevocationEvent.id.in_(query))) while True: rowcount = delete_query.delete(synchronize_session=False) if rowcount == 0: break else: query = session.query(RevocationEvent) query = query.filter(RevocationEvent.revoked_at < oldest) query.delete(synchronize_session=False) session.flush() def list_events(self, last_fetch=None): with sql.session_for_read() as session: query = session.query(RevocationEvent).order_by( RevocationEvent.revoked_at) if last_fetch: query = query.filter(RevocationEvent.revoked_at > last_fetch) events = [revoke_model.RevokeEvent(**e.to_dict()) for e in query] return events def revoke(self, event): kwargs = dict() for attr in revoke_model.REVOKE_KEYS: kwargs[attr] = getattr(event, attr) record = RevocationEvent(**kwargs) with sql.session_for_write() as session: session.add(record) self._prune_expired_events() keystone-9.0.0/keystone/revoke/model.py0000664000567000056710000000112512701407102021321 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.models.revoke_model import * # noqa keystone-9.0.0/keystone/revoke/__init__.py0000664000567000056710000000111512701407102021757 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.revoke.core import * # noqa keystone-9.0.0/keystone/revoke/core.py0000664000567000056710000002325712701407102021163 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Main entry point into the Revoke service.""" import abc import datetime from oslo_config import cfg from oslo_log import versionutils from oslo_utils import timeutils import six from keystone.common import cache from keystone.common import dependency from keystone.common import extension from keystone.common import manager from keystone import exception from keystone.i18n import _ from keystone.models import revoke_model from keystone import notifications CONF = cfg.CONF EXTENSION_DATA = { 'name': 'OpenStack Revoke API', 'namespace': 'http://docs.openstack.org/identity/api/ext/' 'OS-REVOKE/v1.0', 'alias': 'OS-REVOKE', 'updated': '2014-02-24T20:51:0-00:00', 'description': 'OpenStack revoked token reporting mechanism.', 'links': [ { 'rel': 'describedby', 'type': 'text/html', 'href': 'http://specs.openstack.org/openstack/keystone-specs/api/' 'v3/identity-api-v3-os-revoke-ext.html', } ]} extension.register_admin_extension(EXTENSION_DATA['alias'], EXTENSION_DATA) extension.register_public_extension(EXTENSION_DATA['alias'], EXTENSION_DATA) MEMOIZE = cache.get_memoization_decorator(group='revoke') def revoked_before_cutoff_time(): expire_delta = datetime.timedelta( seconds=CONF.token.expiration + CONF.revoke.expiration_buffer) oldest = timeutils.utcnow() - expire_delta return oldest @dependency.provider('revoke_api') class Manager(manager.Manager): """Default pivot point for the Revoke backend. Performs common logic for recording revocations. See :mod:`keystone.common.manager.Manager` for more details on how this dynamically calls the backend. """ driver_namespace = 'keystone.revoke' def __init__(self): super(Manager, self).__init__(CONF.revoke.driver) self._register_listeners() self.model = revoke_model def _user_callback(self, service, resource_type, operation, payload): self.revoke_by_user(payload['resource_info']) def _role_callback(self, service, resource_type, operation, payload): self.revoke( revoke_model.RevokeEvent(role_id=payload['resource_info'])) def _project_callback(self, service, resource_type, operation, payload): self.revoke( revoke_model.RevokeEvent(project_id=payload['resource_info'])) def _domain_callback(self, service, resource_type, operation, payload): self.revoke( revoke_model.RevokeEvent(domain_id=payload['resource_info'])) def _trust_callback(self, service, resource_type, operation, payload): self.revoke( revoke_model.RevokeEvent(trust_id=payload['resource_info'])) def _consumer_callback(self, service, resource_type, operation, payload): self.revoke( revoke_model.RevokeEvent(consumer_id=payload['resource_info'])) def _access_token_callback(self, service, resource_type, operation, payload): self.revoke( revoke_model.RevokeEvent(access_token_id=payload['resource_info'])) def _role_assignment_callback(self, service, resource_type, operation, payload): info = payload['resource_info'] self.revoke_by_grant(role_id=info['role_id'], user_id=info['user_id'], domain_id=info.get('domain_id'), project_id=info.get('project_id')) def _register_listeners(self): callbacks = { notifications.ACTIONS.deleted: [ ['OS-TRUST:trust', self._trust_callback], ['OS-OAUTH1:consumer', self._consumer_callback], ['OS-OAUTH1:access_token', self._access_token_callback], ['role', self._role_callback], ['user', self._user_callback], ['project', self._project_callback], ['role_assignment', self._role_assignment_callback] ], notifications.ACTIONS.disabled: [ ['user', self._user_callback], ['project', self._project_callback], ['domain', self._domain_callback], ], notifications.ACTIONS.internal: [ [notifications.INVALIDATE_USER_TOKEN_PERSISTENCE, self._user_callback], ] } for event, cb_info in callbacks.items(): for resource_type, callback_fns in cb_info: notifications.register_event_callback(event, resource_type, callback_fns) def revoke_by_user(self, user_id): return self.revoke(revoke_model.RevokeEvent(user_id=user_id)) def _assert_not_domain_and_project_scoped(self, domain_id=None, project_id=None): if domain_id is not None and project_id is not None: msg = _('The revoke call must not have both domain_id and ' 'project_id. This is a bug in the Keystone server. The ' 'current request is aborted.') raise exception.UnexpectedError(exception=msg) @versionutils.deprecated(as_of=versionutils.deprecated.JUNO, remove_in=0) def revoke_by_expiration(self, user_id, expires_at, domain_id=None, project_id=None): self._assert_not_domain_and_project_scoped(domain_id=domain_id, project_id=project_id) self.revoke( revoke_model.RevokeEvent(user_id=user_id, expires_at=expires_at, domain_id=domain_id, project_id=project_id)) def revoke_by_audit_id(self, audit_id): self.revoke(revoke_model.RevokeEvent(audit_id=audit_id)) def revoke_by_audit_chain_id(self, audit_chain_id, project_id=None, domain_id=None): self._assert_not_domain_and_project_scoped(domain_id=domain_id, project_id=project_id) self.revoke(revoke_model.RevokeEvent(audit_chain_id=audit_chain_id, domain_id=domain_id, project_id=project_id)) def revoke_by_grant(self, role_id, user_id=None, domain_id=None, project_id=None): self.revoke( revoke_model.RevokeEvent(user_id=user_id, role_id=role_id, domain_id=domain_id, project_id=project_id)) def revoke_by_user_and_project(self, user_id, project_id): self.revoke( revoke_model.RevokeEvent(project_id=project_id, user_id=user_id)) def revoke_by_project_role_assignment(self, project_id, role_id): self.revoke(revoke_model.RevokeEvent(project_id=project_id, role_id=role_id)) def revoke_by_domain_role_assignment(self, domain_id, role_id): self.revoke(revoke_model.RevokeEvent(domain_id=domain_id, role_id=role_id)) @MEMOIZE def _get_revoke_tree(self): events = self.driver.list_events() revoke_tree = revoke_model.RevokeTree(revoke_events=events) return revoke_tree def check_token(self, token_values): """Checks the values from a token against the revocation list :param token_values: dictionary of values from a token, normalized for differences between v2 and v3. The checked values are a subset of the attributes of model.TokenEvent :raises keystone.exception.TokenNotFound: If the token is invalid. """ if self._get_revoke_tree().is_revoked(token_values): raise exception.TokenNotFound(_('Failed to validate token')) def revoke(self, event): self.driver.revoke(event) self._get_revoke_tree.invalidate(self) @six.add_metaclass(abc.ABCMeta) class RevokeDriverV8(object): """Interface for recording and reporting revocation events.""" @abc.abstractmethod def list_events(self, last_fetch=None): """return the revocation events, as a list of objects :param last_fetch: Time of last fetch. Return all events newer. :returns: A list of keystone.revoke.model.RevokeEvent newer than `last_fetch.` If no last_fetch is specified, returns all events for tokens issued after the expiration cutoff. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def revoke(self, event): """register a revocation event :param event: An instance of keystone.revoke.model.RevocationEvent """ raise exception.NotImplemented() # pragma: no cover Driver = manager.create_legacy_driver(RevokeDriverV8) keystone-9.0.0/keystone/revoke/controllers.py0000664000567000056710000000334012701407102022570 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import timeutils from keystone.common import controller from keystone.common import dependency from keystone import exception from keystone.i18n import _ @dependency.requires('revoke_api') class RevokeController(controller.V3Controller): @controller.protected() def list_revoke_events(self, context): since = context['query_string'].get('since') last_fetch = None if since: try: last_fetch = timeutils.normalize_time( timeutils.parse_isotime(since)) except ValueError: raise exception.ValidationError( message=_('invalid date format %s') % since) events = self.revoke_api.list_events(last_fetch=last_fetch) # Build the links by hand as the standard controller calls require ids response = {'events': [event.to_dict() for event in events], 'links': { 'next': None, 'self': RevokeController.base_url( context, path=context['path']), 'previous': None} } return response keystone-9.0.0/keystone/revoke/routers.py0000664000567000056710000000212012701407102021720 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common import json_home from keystone.common import wsgi from keystone.revoke import controllers class Routers(wsgi.RoutersBase): PATH_PREFIX = '/OS-REVOKE' def append_v3_routers(self, mapper, routers): revoke_controller = controllers.RevokeController() self._add_resource( mapper, revoke_controller, path=self.PATH_PREFIX + '/events', get_action='list_revoke_events', rel=json_home.build_v3_extension_resource_relation( 'OS-REVOKE', '1.0', 'events')) keystone-9.0.0/keystone/common/0000775000567000056710000000000012701407246017656 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/common/environment/0000775000567000056710000000000012701407246022222 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/common/environment/__init__.py0000664000567000056710000000642112701407102024325 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import os from oslo_log import log LOG = log.getLogger(__name__) __all__ = ('Server', 'httplib', 'subprocess') _configured = False Server = None httplib = None subprocess = None def configure_once(name): """Ensure that environment configuration is only run once. If environment is reconfigured in the same way then it is ignored. It is an error to attempt to reconfigure environment in a different way. """ def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): global _configured if _configured: if _configured == name: return else: raise SystemError("Environment has already been " "configured as %s" % _configured) LOG.debug("Environment configured as: %s", name) _configured = name return func(*args, **kwargs) return wrapper return decorator @configure_once('eventlet') def use_eventlet(monkeypatch_thread=None): global httplib, subprocess, Server # This must be set before the initial import of eventlet because if # dnspython is present in your environment then eventlet monkeypatches # socket.getaddrinfo() with an implementation which doesn't work for IPv6. os.environ['EVENTLET_NO_GREENDNS'] = 'yes' import eventlet from eventlet.green import httplib as _httplib from eventlet.green import subprocess as _subprocess from keystone.common.environment import eventlet_server if monkeypatch_thread is None: monkeypatch_thread = not os.getenv('STANDARD_THREADS') # Raise the default from 8192 to accommodate large tokens eventlet.wsgi.MAX_HEADER_LINE = 16384 # NOTE(ldbragst): Explicitly declare what should be monkey patched and # what shouldn't. Doing this allows for more readable code when # understanding Eventlet in Keystone. The following is a complete list # of what is monkey patched instead of passing all=False and then passing # module=True to monkey patch a specific module. eventlet.patcher.monkey_patch(os=False, select=True, socket=True, thread=monkeypatch_thread, time=True, psycopg=False, MySQLdb=False) Server = eventlet_server.Server httplib = _httplib subprocess = _subprocess @configure_once('stdlib') def use_stdlib(): global httplib, subprocess import six.moves.http_client as _httplib import subprocess as _subprocess # nosec : This is used in .federation.idp # and .common.openssl. See there. httplib = _httplib subprocess = _subprocess keystone-9.0.0/keystone/common/environment/eventlet_server.py0000664000567000056710000001744412701407102026011 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2010 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import errno import re import socket import ssl import sys import eventlet import eventlet.wsgi import greenlet from oslo_config import cfg from oslo_log import log from oslo_service import service from keystone.i18n import _LE, _LI CONF = cfg.CONF LOG = log.getLogger(__name__) # The size of a pool that is used to spawn a single green thread in which # a wsgi server is then started. The size of one is enough, because in case # of several workers the parent process forks and each child gets a copy # of a pool, which does not include any greenthread object as the spawn is # done after the fork. POOL_SIZE = 1 class EventletFilteringLogger(object): # NOTE(morganfainberg): This logger is designed to filter out specific # Tracebacks to limit the amount of data that eventlet can log. In the # case of broken sockets (EPIPE and ECONNRESET), we are seeing a huge # volume of data being written to the logs due to ~14 lines+ per traceback. # The traceback in these cases are, at best, useful for limited debugging # cases. def __init__(self, logger, level=log.INFO): self.logger = logger self.level = level self.regex = re.compile(r'errno (%d|%d)' % (errno.EPIPE, errno.ECONNRESET), re.IGNORECASE) def write(self, msg): m = self.regex.search(msg) if m: self.logger.log(log.logging.DEBUG, 'Error(%s) writing to socket.', m.group(1)) else: self.logger.log(self.level, msg.rstrip()) class Server(service.ServiceBase): """Server class to manage multiple WSGI sockets and applications.""" def __init__(self, application, host=None, port=None, keepalive=False, keepidle=None): self.application = application self.host = host or '0.0.0.0' # nosec : Bind to all interfaces by # default for backwards compatibility. self.port = port or 0 # Pool for a green thread in which wsgi server will be running self.pool = eventlet.GreenPool(POOL_SIZE) self.socket_info = {} self.greenthread = None self.do_ssl = False self.cert_required = False self.keepalive = keepalive self.keepidle = keepidle self.socket = None def listen(self, key=None, backlog=128): """Create and start listening on socket. Call before forking worker processes. Raises Exception if this has already been called. """ # TODO(dims): eventlet's green dns/socket module does not actually # support IPv6 in getaddrinfo(). We need to get around this in the # future or monitor upstream for a fix. # Please refer below link # (https://bitbucket.org/eventlet/eventlet/ # src/e0f578180d7d82d2ed3d8a96d520103503c524ec/eventlet/support/ # greendns.py?at=0.12#cl-163) info = socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM)[0] try: self.socket = eventlet.listen(info[-1], family=info[0], backlog=backlog) except EnvironmentError: LOG.error(_LE("Could not bind to %(host)s:%(port)s"), {'host': self.host, 'port': self.port}) raise LOG.info(_LI('Starting %(arg0)s on %(host)s:%(port)s'), {'arg0': sys.argv[0], 'host': self.host, 'port': self.port}) def start(self, key=None, backlog=128): """Run a WSGI server with the given application.""" if self.socket is None: self.listen(key=key, backlog=backlog) dup_socket = self.socket.dup() if key: self.socket_info[key] = self.socket.getsockname() # SSL is enabled if self.do_ssl: if self.cert_required: cert_reqs = ssl.CERT_REQUIRED else: cert_reqs = ssl.CERT_NONE dup_socket = eventlet.wrap_ssl(dup_socket, certfile=self.certfile, keyfile=self.keyfile, server_side=True, cert_reqs=cert_reqs, ca_certs=self.ca_certs) # Optionally enable keepalive on the wsgi socket. if self.keepalive: dup_socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) if self.keepidle is not None: if hasattr(socket, 'TCP_KEEPIDLE'): dup_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, self.keepidle) else: LOG.warning("System does not support TCP_KEEPIDLE but " "tcp_keepidle has been set. Ignoring.") self.greenthread = self.pool.spawn(self._run, self.application, dup_socket) def set_ssl(self, certfile, keyfile=None, ca_certs=None, cert_required=True): self.certfile = certfile self.keyfile = keyfile self.ca_certs = ca_certs self.cert_required = cert_required self.do_ssl = True def stop(self): if self.greenthread is not None: self.greenthread.kill() def wait(self): """Wait until all servers have completed running.""" try: self.pool.waitall() except KeyboardInterrupt: # nosec # If CTRL-C, just break out of the loop. pass except greenlet.GreenletExit: # nosec # If exiting, break out of the loop. pass def reset(self): """Required by the service interface. The service interface is used by the launcher when receiving a SIGHUP. The service interface is defined in oslo_service.service.Service. Keystone does not need to do anything here. """ pass def _run(self, application, socket): """Start a WSGI server with a new green thread pool.""" logger = log.getLogger('eventlet.wsgi.server') # NOTE(dolph): [eventlet_server] client_socket_timeout is required to # be an integer in keystone.conf, but in order to make # eventlet.wsgi.server() wait forever, we pass None instead of 0. socket_timeout = CONF.eventlet_server.client_socket_timeout or None try: eventlet.wsgi.server( socket, application, log=EventletFilteringLogger(logger), debug=False, keepalive=CONF.eventlet_server.wsgi_keep_alive, socket_timeout=socket_timeout) except greenlet.GreenletExit: # nosec # Wait until all servers have completed running pass except Exception: LOG.exception(_LE('Server error')) raise keystone-9.0.0/keystone/common/validation/0000775000567000056710000000000012701407246022010 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/common/validation/__init__.py0000664000567000056710000000744312701407102024120 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Request body validating middleware for OpenStack Identity resources.""" import functools import inspect from keystone.common.validation import validators from keystone import exception from keystone.i18n import _ def validated(request_body_schema, resource_to_validate): """Register a schema to validate a resource reference. Registered schema will be used for validating a request body just before API method execution. :param request_body_schema: a schema to validate the resource reference :param resource_to_validate: the reference to validate :raises keystone.exception.ValidationError: if `resource_to_validate` is None. (see wrapper method below). :raises TypeError: at decoration time when the expected resource to validate isn't found in the decorated method's signature """ schema_validator = validators.SchemaValidator(request_body_schema) def add_validator(func): argspec = inspect.getargspec(func) try: arg_index = argspec.args.index(resource_to_validate) except ValueError: raise TypeError(_('validated expected to find %(param_name)r in ' 'function signature for %(func_name)r.') % {'param_name': resource_to_validate, 'func_name': func.__name__}) @functools.wraps(func) def wrapper(*args, **kwargs): if (resource_to_validate in kwargs and kwargs[resource_to_validate] is not None): schema_validator.validate(kwargs[resource_to_validate]) else: try: resource = args[arg_index] # If the resource to be validated is not None but # empty, it is possible to be validated by jsonschema. if resource is not None: schema_validator.validate(resource) else: raise exception.ValidationError( attribute=resource_to_validate, target='request body') # We cannot find the resource neither from kwargs nor args. except IndexError: raise exception.ValidationError( attribute=resource_to_validate, target='request body') return func(*args, **kwargs) return wrapper return add_validator def nullable(property_schema): """Clone a property schema into one that is nullable. :param dict property_schema: schema to clone into a nullable schema :returns: a new dict schema """ # TODO(dstanek): deal with the case where type is already a list; we don't # do that yet so I'm not wasting time on it new_schema = property_schema.copy() new_schema['type'] = [property_schema['type'], 'null'] return new_schema def add_array_type(property_schema): """Convert the parameter schema to be of type list. :param dict property_schema: schema to add array type to :returns: a new dict schema """ new_schema = property_schema.copy() new_schema['type'] = [property_schema['type'], 'array'] return new_schema keystone-9.0.0/keystone/common/validation/validators.py0000664000567000056710000000522112701407105024524 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Internal implementation of request body validating middleware.""" import jsonschema from keystone import exception from keystone.i18n import _ class SchemaValidator(object): """Resource reference validator class.""" validator_org = jsonschema.Draft4Validator def __init__(self, schema): # NOTE(lbragstad): If at some point in the future we want to extend # our validators to include something specific we need to check for, # we can do it here. Nova's V3 API validators extend the validator to # include `self._validate_minimum` and `self._validate_maximum`. This # would be handy if we needed to check for something the jsonschema # didn't by default. See the Nova V3 validator for details on how this # is done. validators = {} validator_cls = jsonschema.validators.extend(self.validator_org, validators) fc = jsonschema.FormatChecker() self.validator = validator_cls(schema, format_checker=fc) def validate(self, *args, **kwargs): try: self.validator.validate(*args, **kwargs) except jsonschema.ValidationError as ex: # NOTE: For whole OpenStack message consistency, this error # message has been written in a format consistent with WSME. if ex.path: # NOTE(lbragstad): Here we could think about using iter_errors # as a method of providing invalid parameters back to the # user. # TODO(lbragstad): If the value of a field is confidential or # too long, then we should build the masking in here so that # we don't expose sensitive user information in the event it # fails validation. detail = _("Invalid input for field '%(path)s'. The value is " "'%(value)s'.") % {'path': ex.path.pop(), 'value': ex.instance} else: detail = ex.message raise exception.SchemaValidationError(detail=detail) keystone-9.0.0/keystone/common/validation/parameter_types.py0000664000567000056710000000371712701407105025570 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Common parameter types for validating a request reference.""" boolean = { 'type': 'boolean', 'enum': [True, False] } # NOTE(lbragstad): Be mindful of this pattern as it might require changes # once this is used on user names, LDAP-based user names specifically since # commas aren't allowed in the following pattern. Here we are only going to # check the length of the name and ensure that it's a string. Right now we are # not going to validate on a naming pattern for issues with # internationalization. name = { 'type': 'string', 'minLength': 1, 'maxLength': 255 } external_id_string = { 'type': 'string', 'minLength': 1, 'maxLength': 64 } id_string = { 'type': 'string', 'minLength': 1, 'maxLength': 64, # TODO(lbragstad): Find a way to make this configurable such that the end # user chooses how much control they want over id_strings with a regex 'pattern': '^[a-zA-Z0-9-]+$' } mapping_id_string = { 'type': 'string', 'minLength': 1, 'maxLength': 64, 'pattern': '^[a-zA-Z0-9-_]+$' } description = { 'type': 'string' } url = { 'type': 'string', 'minLength': 0, 'maxLength': 225, # NOTE(edmondsw): we could do more to validate per various RFCs, but # decision was made to err on the side of leniency. The following is based # on rfc1738 section 2.1 'pattern': '^[a-zA-Z0-9+.-]+:.+' } email = { 'type': 'string', 'format': 'email' } keystone-9.0.0/keystone/common/dependency.py0000664000567000056710000001675512701407102022353 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """This module provides support for dependency injection. Providers are registered via the ``@provider()`` decorator, and dependencies on them are registered with ``@requires()``. Providers are available to their consumers via an attribute. See the documentation for the individual functions for more detail. See also: https://en.wikipedia.org/wiki/Dependency_injection """ import traceback from keystone.i18n import _ _REGISTRY = {} _future_dependencies = {} _factories = {} def _set_provider(name, provider): _original_provider, where_registered = _REGISTRY.get(name, (None, None)) if where_registered: raise Exception('%s already has a registered provider, at\n%s' % (name, ''.join(where_registered))) _REGISTRY[name] = (provider, traceback.format_stack()) GET_REQUIRED = object() GET_OPTIONAL = object() def get_provider(name, optional=GET_REQUIRED): if optional is GET_REQUIRED: return _REGISTRY[name][0] return _REGISTRY.get(name, (None, None))[0] class UnresolvableDependencyException(Exception): """Raised when a required dependency is not resolvable. See ``resolve_future_dependencies()`` for more details. """ def __init__(self, name, targets): msg = _('Unregistered dependency: %(name)s for %(targets)s') % { 'name': name, 'targets': targets} super(UnresolvableDependencyException, self).__init__(msg) def provider(name): """A class decorator used to register providers. When ``@provider()`` is used to decorate a class, members of that class will register themselves as providers for the named dependency. As an example, In the code fragment:: @dependency.provider('foo_api') class Foo: def __init__(self): ... ... foo = Foo() The object ``foo`` will be registered as a provider for ``foo_api``. No more than one such instance should be created; additional instances will replace the previous ones, possibly resulting in different instances being used by different consumers. """ def wrapper(cls): def wrapped(init): def __wrapped_init__(self, *args, **kwargs): """Initialize the wrapped object and add it to the registry.""" init(self, *args, **kwargs) _set_provider(name, self) resolve_future_dependencies(__provider_name=name) return __wrapped_init__ cls.__init__ = wrapped(cls.__init__) _factories[name] = cls return cls return wrapper def _process_dependencies(obj): # Any dependencies that can be resolved immediately are resolved. # Dependencies that cannot be resolved immediately are stored for # resolution in resolve_future_dependencies. def process(obj, attr_name, unresolved_in_out): for dependency in getattr(obj, attr_name, []): if dependency not in _REGISTRY: # We don't know about this dependency, so save it for later. unresolved_in_out.setdefault(dependency, []).append(obj) continue setattr(obj, dependency, get_provider(dependency)) process(obj, '_dependencies', _future_dependencies) def requires(*dependencies): """A class decorator used to inject providers into consumers. The required providers will be made available to instances of the decorated class via an attribute with the same name as the provider. For example, in the code fragment:: @dependency.requires('foo_api', 'bar_api') class FooBarClient: def __init__(self): ... ... client = FooBarClient() The object ``client`` will have attributes named ``foo_api`` and ``bar_api``, which are instances of the named providers. Objects must not rely on the existence of these attributes until after ``resolve_future_dependencies()`` has been called; they may not exist beforehand. Dependencies registered via ``@required()`` must have providers; if not, an ``UnresolvableDependencyException`` will be raised when ``resolve_future_dependencies()`` is called. """ def wrapper(self, *args, **kwargs): """Inject each dependency from the registry.""" self.__wrapped_init__(*args, **kwargs) _process_dependencies(self) def wrapped(cls): """Note the required dependencies on the object for later injection. The dependencies of the parent class are combined with that of the child class to create a new set of dependencies. """ existing_dependencies = getattr(cls, '_dependencies', set()) cls._dependencies = existing_dependencies.union(dependencies) if not hasattr(cls, '__wrapped_init__'): cls.__wrapped_init__ = cls.__init__ cls.__init__ = wrapper return cls return wrapped def resolve_future_dependencies(__provider_name=None): """Forces injection of all dependencies. Before this function is called, circular dependencies may not have been injected. This function should be called only once, after all global providers are registered. If an object needs to be created after this call, it must not have circular dependencies. If any required dependencies are unresolvable, this function will raise an ``UnresolvableDependencyException``. Outside of this module, this function should be called with no arguments; the optional argument, ``__provider_name`` is used internally, and should be treated as an implementation detail. """ new_providers = dict() if __provider_name: # A provider was registered, so take care of any objects depending on # it. targets = _future_dependencies.pop(__provider_name, []) for target in targets: setattr(target, __provider_name, get_provider(__provider_name)) return # Resolve future dependencies, raises UnresolvableDependencyException if # there's no provider registered. try: for dependency, targets in _future_dependencies.copy().items(): if dependency not in _REGISTRY: # a Class was registered that could fulfill the dependency, but # it has not yet been initialized. factory = _factories.get(dependency) if factory: provider = factory() new_providers[dependency] = provider else: raise UnresolvableDependencyException(dependency, targets) for target in targets: setattr(target, dependency, get_provider(dependency)) finally: _future_dependencies.clear() return new_providers def reset(): """Reset the registry of providers. This is useful for unit testing to ensure that tests don't use providers from previous tests. """ _REGISTRY.clear() _future_dependencies.clear() keystone-9.0.0/keystone/common/driver_hints.py0000664000567000056710000001106312701407102022720 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools from keystone import exception from keystone.i18n import _ def truncated(f): """Ensure list truncation is detected in Driver list entity methods. This is designed to wrap Driver list_{entity} methods in order to calculate if the resultant list has been truncated. Provided a limit dict is found in the hints list, we increment the limit by one so as to ask the wrapped function for one more entity than the limit, and then once the list has been generated, we check to see if the original limit has been exceeded, in which case we truncate back to that limit and set the 'truncated' boolean to 'true' in the hints limit dict. """ @functools.wraps(f) def wrapper(self, hints, *args, **kwargs): if not hasattr(hints, 'limit'): raise exception.UnexpectedError( _('Cannot truncate a driver call without hints list as ' 'first parameter after self ')) if hints.limit is None: return f(self, hints, *args, **kwargs) # A limit is set, so ask for one more entry than we need list_limit = hints.limit['limit'] hints.set_limit(list_limit + 1) ref_list = f(self, hints, *args, **kwargs) # If we got more than the original limit then trim back the list and # mark it truncated. In both cases, make sure we set the limit back # to its original value. if len(ref_list) > list_limit: hints.set_limit(list_limit, truncated=True) return ref_list[:list_limit] else: hints.set_limit(list_limit) return ref_list return wrapper class Hints(object): """Encapsulate driver hints for listing entities. Hints are modifiers that affect the return of entities from a list_ operation. They are typically passed to a driver to give direction as to what filtering, pagination or list limiting actions are being requested. It is optional for a driver to action some or all of the list hints, but any filters that it does satisfy must be marked as such by calling removing the filter from the list. A Hint object contains filters, which is a list of dicts that can be accessed publicly. Also it contains a dict called limit, which will indicate the amount of data we want to limit our listing to. If the filter is discovered to never match, then `cannot_match` can be set to indicate that there will not be any matches and the backend work can be short-circuited. Each filter term consists of: * ``name``: the name of the attribute being matched * ``value``: the value against which it is being matched * ``comparator``: the operation, which can be one of ``equals``, ``contains``, ``startswith`` or ``endswith`` * ``case_sensitive``: whether any comparison should take account of case * ``type``: will always be 'filter' """ def __init__(self): self.limit = None self.filters = list() self.cannot_match = False def add_filter(self, name, value, comparator='equals', case_sensitive=False): """Adds a filter to the filters list, which is publicly accessible.""" self.filters.append({'name': name, 'value': value, 'comparator': comparator, 'case_sensitive': case_sensitive, 'type': 'filter'}) def get_exact_filter_by_name(self, name): """Return a filter key and value if exact filter exists for name.""" for entry in self.filters: if (entry['type'] == 'filter' and entry['name'] == name and entry['comparator'] == 'equals'): return entry def set_limit(self, limit, truncated=False): """Set a limit to indicate the list should be truncated.""" self.limit = {'limit': limit, 'type': 'limit', 'truncated': truncated} keystone-9.0.0/keystone/common/extension.py0000664000567000056710000000316312701407102022236 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ADMIN_EXTENSIONS = {} PUBLIC_EXTENSIONS = {} def register_admin_extension(url_prefix, extension_data): """Register extension with collection of admin extensions. Extensions register the information here that will show up in the /extensions page as a way to indicate that the extension is active. url_prefix: unique key for the extension that will appear in the urls generated by the extension. extension_data is a dictionary. The expected fields are: 'name': short, human readable name of the extension 'namespace': xml namespace 'alias': identifier for the extension 'updated': date the extension was last updated 'description': text description of the extension 'links': hyperlinks to documents describing the extension """ ADMIN_EXTENSIONS[url_prefix] = extension_data def register_public_extension(url_prefix, extension_data): """Same as register_admin_extension but for public extensions.""" PUBLIC_EXTENSIONS[url_prefix] = extension_data keystone-9.0.0/keystone/common/cache/0000775000567000056710000000000012701407246020721 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/common/cache/backends/0000775000567000056710000000000012701407246022473 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/common/cache/backends/mongo.py0000664000567000056710000000161112701407102024152 0ustar jenkinsjenkins00000000000000# Copyright 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_cache.backends import mongo from oslo_log import versionutils @versionutils.deprecated( versionutils.deprecated.MITAKA, what='keystone.cache.mongo backend', in_favor_of='oslo_cache.mongo backend', remove_in=+1) class MongoCacheBackend(mongo.MongoCacheBackend): pass keystone-9.0.0/keystone/common/cache/backends/memcache_pool.py0000664000567000056710000000171712701407102025635 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """This module is deprecated.""" from oslo_cache.backends import memcache_pool from oslo_log import versionutils @versionutils.deprecated( versionutils.deprecated.MITAKA, what='keystone.cache.memcache_pool backend', in_favor_of='oslo_cache.memcache_pool backend', remove_in=+1) class PooledMemcachedBackend(memcache_pool.PooledMemcachedBackend): pass keystone-9.0.0/keystone/common/cache/backends/__init__.py0000664000567000056710000000000012701407102024561 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/common/cache/backends/noop.py0000664000567000056710000000314412701407102024011 0ustar jenkinsjenkins00000000000000# Copyright 2013 Metacloud # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from dogpile.cache import api from oslo_log import versionutils NO_VALUE = api.NO_VALUE @versionutils.deprecated( versionutils.deprecated.MITAKA, what='keystone.common.cache.noop backend', in_favor_of="dogpile.cache's Null backend", remove_in=+1) class NoopCacheBackend(api.CacheBackend): """A no op backend as a default caching backend. The no op backend is provided as the default caching backend for keystone to ensure that ``dogpile.cache.memory`` is not used in any real-world circumstances unintentionally. ``dogpile.cache.memory`` does not have a mechanism to cleanup it's internal dict and therefore could cause run-away memory utilization. """ def __init__(self, *args): return def get(self, key): return NO_VALUE def get_multi(self, keys): return [NO_VALUE for x in keys] def set(self, key, value): return def set_multi(self, mapping): return def delete(self, key): return def delete_multi(self, keys): return keystone-9.0.0/keystone/common/cache/__init__.py0000664000567000056710000000116012701407102023017 0ustar jenkinsjenkins00000000000000# Copyright 2013 Metacloud # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common.cache.core import * # noqa keystone-9.0.0/keystone/common/cache/core.py0000664000567000056710000001061212701407102022212 0ustar jenkinsjenkins00000000000000# Copyright 2013 Metacloud # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Keystone Caching Layer Implementation.""" import dogpile.cache from dogpile.cache import api from oslo_cache import core as cache from oslo_config import cfg from keystone.common.cache import _context_cache CONF = cfg.CONF CACHE_REGION = cache.create_region() def configure_cache(region=None): if region is None: region = CACHE_REGION # NOTE(morganfainberg): running cache.configure_cache_region() # sets region.is_configured, this must be captured before # cache.configure_cache_region is called. configured = region.is_configured cache.configure_cache_region(CONF, region) # Only wrap the region if it was not configured. This should be pushed # to oslo_cache lib somehow. if not configured: region.wrap(_context_cache._ResponseCacheProxy) def get_memoization_decorator(group, expiration_group=None, region=None): if region is None: region = CACHE_REGION return cache.get_memoization_decorator(CONF, region, group, expiration_group=expiration_group) # NOTE(stevemar): When memcache_pool, mongo and noop backends are removed # we no longer need to register the backends here. dogpile.cache.register_backend( 'keystone.common.cache.noop', 'keystone.common.cache.backends.noop', 'NoopCacheBackend') dogpile.cache.register_backend( 'keystone.cache.mongo', 'keystone.common.cache.backends.mongo', 'MongoCacheBackend') dogpile.cache.register_backend( 'keystone.cache.memcache_pool', 'keystone.common.cache.backends.memcache_pool', 'PooledMemcachedBackend') # TODO(morganfainberg): Move this logic up into oslo.cache directly # so we can handle region-wide invalidations or alternatively propose # a fix to dogpile.cache to make region-wide invalidates possible to # work across distributed processes. class _RegionInvalidator(object): def __init__(self, region, region_name): self.region = region self.region_name = region_name region_key = '_RegionExpiration.%(type)s.%(region_name)s' self.soft_region_key = region_key % {'type': 'soft', 'region_name': self.region_name} self.hard_region_key = region_key % {'type': 'hard', 'region_name': self.region_name} @property def hard_invalidated(self): invalidated = self.region.backend.get(self.hard_region_key) if invalidated is not api.NO_VALUE: return invalidated.payload return None @hard_invalidated.setter def hard_invalidated(self, value): self.region.set(self.hard_region_key, value) @hard_invalidated.deleter def hard_invalidated(self): self.region.delete(self.hard_region_key) @property def soft_invalidated(self): invalidated = self.region.backend.get(self.soft_region_key) if invalidated is not api.NO_VALUE: return invalidated.payload return None @soft_invalidated.setter def soft_invalidated(self, value): self.region.set(self.soft_region_key, value) @soft_invalidated.deleter def soft_invalidated(self): self.region.delete(self.soft_region_key) def apply_invalidation_patch(region, region_name): """Patch the region interfaces to ensure we share the expiration time. This method is used to patch region.invalidate, region._hard_invalidated, and region._soft_invalidated. """ # Patch the region object. This logic needs to be moved up into dogpile # itself. Patching the internal interfaces, unfortunately, is the only # way to handle this at the moment. invalidator = _RegionInvalidator(region=region, region_name=region_name) setattr(region, '_hard_invalidated', invalidator.hard_invalidated) setattr(region, '_soft_invalidated', invalidator.soft_invalidated) keystone-9.0.0/keystone/common/cache/_context_cache.py0000664000567000056710000001064612701407102024237 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """A dogpile.cache proxy that caches objects in the request local cache.""" from dogpile.cache import api from dogpile.cache import proxy from oslo_context import context as oslo_context from oslo_serialization import msgpackutils from keystone.models import revoke_model class _RevokeModelHandler(object): # NOTE(morganfainberg): There needs to be reserved "registry" entries set # in oslo_serialization for application-specific handlers. We picked 127 # here since it's waaaaaay far out before oslo_serialization will use it. identity = 127 handles = (revoke_model.RevokeTree,) def __init__(self, registry): self._registry = registry def serialize(self, obj): return msgpackutils.dumps(obj.revoke_map, registry=self._registry) def deserialize(self, data): revoke_map = msgpackutils.loads(data, registry=self._registry) revoke_tree = revoke_model.RevokeTree() revoke_tree.revoke_map = revoke_map return revoke_tree # Register our new handler. _registry = msgpackutils.default_registry _registry.frozen = False _registry.register(_RevokeModelHandler(registry=_registry)) _registry.frozen = True class _ResponseCacheProxy(proxy.ProxyBackend): __key_pfx = '_request_cache_%s' def _get_request_context(self): # Return the current context or a new/empty context. return oslo_context.get_current() or oslo_context.RequestContext() def _get_request_key(self, key): return self.__key_pfx % key def _set_local_cache(self, key, value, ctx=None): # Set a serialized version of the returned value in local cache for # subsequent calls to the memoized method. if not ctx: ctx = self._get_request_context() serialize = {'payload': value.payload, 'metadata': value.metadata} setattr(ctx, self._get_request_key(key), msgpackutils.dumps(serialize)) ctx.update_store() def _get_local_cache(self, key): # Return the version from our local request cache if it exists. ctx = self._get_request_context() try: value = getattr(ctx, self._get_request_key(key)) except AttributeError: return api.NO_VALUE value = msgpackutils.loads(value) return api.CachedValue(payload=value['payload'], metadata=value['metadata']) def _delete_local_cache(self, key): # On invalidate/delete remove the value from the local request cache ctx = self._get_request_context() try: delattr(ctx, self._get_request_key(key)) ctx.update_store() except AttributeError: # nosec # NOTE(morganfainberg): We will simply pass here, this value has # not been cached locally in the request. pass def get(self, key): value = self._get_local_cache(key) if value is api.NO_VALUE: value = self.proxied.get(key) return value def set(self, key, value): self._set_local_cache(key, value) self.proxied.set(key, value) def delete(self, key): self._delete_local_cache(key) self.proxied.delete(key) def get_multi(self, keys): values = {} for key in keys: v = self._get_local_cache(key) if v is not api.NO_VALUE: values[key] = v query_keys = set(keys).difference(set(values.keys())) values.update(dict( zip(query_keys, self.proxied.get_multi(query_keys)))) return [values[k] for k in keys] def set_multi(self, mapping): ctx = self._get_request_context() for k, v in mapping.items(): self._set_local_cache(k, v, ctx) self.proxied.set_multi(mapping) def delete_multi(self, keys): for k in keys: self._delete_local_cache(k) self.proxied.delete_multi(keys) keystone-9.0.0/keystone/common/utils.py0000664000567000056710000004714012701407105021370 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 - 2012 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import calendar import collections import grp import hashlib import os import pwd import uuid from oslo_config import cfg from oslo_log import log from oslo_serialization import jsonutils from oslo_utils import reflection from oslo_utils import strutils from oslo_utils import timeutils import passlib.hash import six from six import moves from keystone.common import authorization from keystone import exception from keystone.i18n import _, _LE, _LW CONF = cfg.CONF LOG = log.getLogger(__name__) # NOTE(stevermar): This UUID must stay the same, forever, across # all of keystone to preserve its value as a URN namespace, which is # used for ID transformation. RESOURCE_ID_NAMESPACE = uuid.UUID('4332ecab-770b-4288-a680-b9aca3b1b153') def resource_uuid(value): """Converts input to valid UUID hex digits.""" try: uuid.UUID(value) return value except ValueError: if len(value) <= 64: if six.PY2 and isinstance(value, six.text_type): value = value.encode('utf-8') return uuid.uuid5(RESOURCE_ID_NAMESPACE, value).hex raise ValueError(_('Length of transformable resource id > 64, ' 'which is max allowed characters')) def flatten_dict(d, parent_key=''): """Flatten a nested dictionary Converts a dictionary with nested values to a single level flat dictionary, with dotted notation for each key. """ items = [] for k, v in d.items(): new_key = parent_key + '.' + k if parent_key else k if isinstance(v, collections.MutableMapping): items.extend(list(flatten_dict(v, new_key).items())) else: items.append((new_key, v)) return dict(items) def read_cached_file(filename, cache_info, reload_func=None): """Read from a file if it has been modified. :param cache_info: dictionary to hold opaque cache. :param reload_func: optional function to be called with data when file is reloaded due to a modification. :returns: data from file. """ mtime = os.path.getmtime(filename) if not cache_info or mtime != cache_info.get('mtime'): with open(filename) as fap: cache_info['data'] = fap.read() cache_info['mtime'] = mtime if reload_func: reload_func(cache_info['data']) return cache_info['data'] class SmarterEncoder(jsonutils.json.JSONEncoder): """Help for JSON encoding dict-like objects.""" def default(self, obj): if not isinstance(obj, dict) and hasattr(obj, 'iteritems'): return dict(obj.iteritems()) return super(SmarterEncoder, self).default(obj) class PKIEncoder(SmarterEncoder): """Special encoder to make token JSON a bit shorter.""" item_separator = ',' key_separator = ':' def verify_length_and_trunc_password(password): """Verify and truncate the provided password to the max_password_length.""" max_length = CONF.identity.max_password_length try: if len(password) > max_length: if CONF.strict_password_check: raise exception.PasswordVerificationError(size=max_length) else: LOG.warning( _LW('Truncating user password to ' '%d characters.'), max_length) return password[:max_length] else: return password except TypeError: raise exception.ValidationError(attribute='string', target='password') def hash_access_key(access): hash_ = hashlib.sha256() if not isinstance(access, six.binary_type): access = access.encode('utf-8') hash_.update(access) return hash_.hexdigest() def hash_user_password(user): """Hash a user dict's password without modifying the passed-in dict.""" password = user.get('password') if password is None: return user return dict(user, password=hash_password(password)) def hash_password(password): """Hash a password. Hard.""" password_utf8 = verify_length_and_trunc_password(password).encode('utf-8') return passlib.hash.sha512_crypt.encrypt( password_utf8, rounds=CONF.crypt_strength) def check_password(password, hashed): """Check that a plaintext password matches hashed. hashpw returns the salt value concatenated with the actual hash value. It extracts the actual salt if this value is then passed as the salt. """ if password is None or hashed is None: return False password_utf8 = verify_length_and_trunc_password(password).encode('utf-8') return passlib.hash.sha512_crypt.verify(password_utf8, hashed) def attr_as_boolean(val_attr): """Returns the boolean value, decoded from a string. We test explicitly for a value meaning False, which can be one of several formats as specified in oslo strutils.FALSE_STRINGS. All other string values (including an empty string) are treated as meaning True. """ return strutils.bool_from_string(val_attr, default=True) def get_blob_from_credential(credential): try: blob = jsonutils.loads(credential.blob) except (ValueError, TypeError): raise exception.ValidationError( message=_('Invalid blob in credential')) if not blob or not isinstance(blob, dict): raise exception.ValidationError(attribute='blob', target='credential') return blob def convert_ec2_to_v3_credential(ec2credential): blob = {'access': ec2credential.access, 'secret': ec2credential.secret} return {'id': hash_access_key(ec2credential.access), 'user_id': ec2credential.user_id, 'project_id': ec2credential.tenant_id, 'blob': jsonutils.dumps(blob), 'type': 'ec2', 'extra': jsonutils.dumps({})} def convert_v3_to_ec2_credential(credential): blob = get_blob_from_credential(credential) return {'access': blob.get('access'), 'secret': blob.get('secret'), 'user_id': credential.user_id, 'tenant_id': credential.project_id, } def unixtime(dt_obj): """Format datetime object as unix timestamp :param dt_obj: datetime.datetime object :returns: float """ return calendar.timegm(dt_obj.utctimetuple()) def auth_str_equal(provided, known): """Constant-time string comparison. :params provided: the first string :params known: the second string :returns: True if the strings are equal. This function takes two strings and compares them. It is intended to be used when doing a comparison for authentication purposes to help guard against timing attacks. When using the function for this purpose, always provide the user-provided password as the first argument. The time this function will take is always a factor of the length of this string. """ result = 0 p_len = len(provided) k_len = len(known) for i in moves.range(p_len): a = ord(provided[i]) if i < p_len else 0 b = ord(known[i]) if i < k_len else 0 result |= a ^ b return (p_len == k_len) & (result == 0) def setup_remote_pydev_debug(): if CONF.pydev_debug_host and CONF.pydev_debug_port: try: try: from pydev import pydevd except ImportError: import pydevd pydevd.settrace(CONF.pydev_debug_host, port=CONF.pydev_debug_port, stdoutToServer=True, stderrToServer=True) return True except Exception: LOG.exception(_LE( 'Error setting up the debug environment. Verify that the ' 'option --debug-url has the format : and that a ' 'debugger processes is listening on that port.')) raise def get_unix_user(user=None): """Get the uid and user name. This is a convenience utility which accepts a variety of input which might represent a unix user. If successful it returns the uid and name. Valid input is: string A string is first considered to be a user name and a lookup is attempted under that name. If no name is found then an attempt is made to convert the string to an integer and perform a lookup as a uid. int An integer is interpreted as a uid. None None is interpreted to mean use the current process's effective user. If the input is a valid type but no user is found a KeyError is raised. If the input is not a valid type a TypeError is raised. :param object user: string, int or None specifying the user to lookup. :returns: tuple of (uid, name) """ if isinstance(user, six.string_types): try: user_info = pwd.getpwnam(user) except KeyError: try: i = int(user) except ValueError: raise KeyError("user name '%s' not found" % user) try: user_info = pwd.getpwuid(i) except KeyError: raise KeyError("user id %d not found" % i) elif isinstance(user, int): try: user_info = pwd.getpwuid(user) except KeyError: raise KeyError("user id %d not found" % user) elif user is None: user_info = pwd.getpwuid(os.geteuid()) else: user_cls_name = reflection.get_class_name(user, fully_qualified=False) raise TypeError('user must be string, int or None; not %s (%r)' % (user_cls_name, user)) return user_info.pw_uid, user_info.pw_name def get_unix_group(group=None): """Get the gid and group name. This is a convenience utility which accepts a variety of input which might represent a unix group. If successful it returns the gid and name. Valid input is: string A string is first considered to be a group name and a lookup is attempted under that name. If no name is found then an attempt is made to convert the string to an integer and perform a lookup as a gid. int An integer is interpreted as a gid. None None is interpreted to mean use the current process's effective group. If the input is a valid type but no group is found a KeyError is raised. If the input is not a valid type a TypeError is raised. :param object group: string, int or None specifying the group to lookup. :returns: tuple of (gid, name) """ if isinstance(group, six.string_types): try: group_info = grp.getgrnam(group) except KeyError: # Was an int passed as a string? # Try converting to int and lookup by id instead. try: i = int(group) except ValueError: raise KeyError("group name '%s' not found" % group) try: group_info = grp.getgrgid(i) except KeyError: raise KeyError("group id %d not found" % i) elif isinstance(group, int): try: group_info = grp.getgrgid(group) except KeyError: raise KeyError("group id %d not found" % group) elif group is None: group_info = grp.getgrgid(os.getegid()) else: group_cls_name = reflection.get_class_name(group, fully_qualified=False) raise TypeError('group must be string, int or None; not %s (%r)' % (group_cls_name, group)) return group_info.gr_gid, group_info.gr_name def set_permissions(path, mode=None, user=None, group=None, log=None): """Set the ownership and permissions on the pathname. Each of the mode, user and group are optional, if None then that aspect is not modified. Owner and group may be specified either with a symbolic name or numeric id. :param string path: Pathname of directory whose existence is assured. :param object mode: ownership permissions flags (int) i.e. chmod, if None do not set. :param object user: set user, name (string) or uid (integer), if None do not set. :param object group: set group, name (string) or gid (integer) if None do not set. :param logger log: logging.logger object, used to emit log messages, if None no logging is performed. """ if user is None: user_uid, user_name = None, None else: user_uid, user_name = get_unix_user(user) if group is None: group_gid, group_name = None, None else: group_gid, group_name = get_unix_group(group) if log: if mode is None: mode_string = str(mode) else: mode_string = oct(mode) log.debug("set_permissions: " "path='%s' mode=%s user=%s(%s) group=%s(%s)", path, mode_string, user_name, user_uid, group_name, group_gid) # Change user and group if specified if user_uid is not None or group_gid is not None: if user_uid is None: user_uid = -1 if group_gid is None: group_gid = -1 try: os.chown(path, user_uid, group_gid) except OSError as exc: raise EnvironmentError("chown('%s', %s, %s): %s" % (path, user_name, group_name, exc.strerror)) # Change permission flags if mode is not None: try: os.chmod(path, mode) except OSError as exc: raise EnvironmentError("chmod('%s', %#o): %s" % (path, mode, exc.strerror)) def make_dirs(path, mode=None, user=None, group=None, log=None): """Assure directory exists, set ownership and permissions. Assure the directory exists and optionally set its ownership and permissions. Each of the mode, user and group are optional, if None then that aspect is not modified. Owner and group may be specified either with a symbolic name or numeric id. :param string path: Pathname of directory whose existence is assured. :param object mode: ownership permissions flags (int) i.e. chmod, if None do not set. :param object user: set user, name (string) or uid (integer), if None do not set. :param object group: set group, name (string) or gid (integer) if None do not set. :param logger log: logging.logger object, used to emit log messages, if None no logging is performed. """ if log: if mode is None: mode_string = str(mode) else: mode_string = oct(mode) log.debug("make_dirs path='%s' mode=%s user=%s group=%s", path, mode_string, user, group) if not os.path.exists(path): try: os.makedirs(path) except OSError as exc: raise EnvironmentError("makedirs('%s'): %s" % (path, exc.strerror)) set_permissions(path, mode, user, group, log) class WhiteListedItemFilter(object): def __init__(self, whitelist, data): self._whitelist = set(whitelist or []) self._data = data def __getitem__(self, name): if name not in self._whitelist: raise KeyError return self._data[name] _ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f' _ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S' def isotime(at=None, subsecond=False): """Stringify time in ISO 8601 format.""" # Python provides a similar instance method for datetime.datetime objects # called isoformat(). The format of the strings generated by isoformat() # have a couple of problems: # 1) The strings generated by isotime are used in tokens and other public # APIs that we can't change without a deprecation period. The strings # generated by isoformat are not the same format, so we can't just # change to it. # 2) The strings generated by isoformat do not include the microseconds if # the value happens to be 0. This will likely show up as random failures # as parsers may be written to always expect microseconds, and it will # parse correctly most of the time. if not at: at = timeutils.utcnow() st = at.strftime(_ISO8601_TIME_FORMAT if not subsecond else _ISO8601_TIME_FORMAT_SUBSECOND) tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC' st += ('Z' if tz == 'UTC' else tz) return st def strtime(): at = timeutils.utcnow() return at.strftime(timeutils.PERFECT_TIME_FORMAT) def get_token_ref(context): """Retrieves KeystoneToken object from the auth context and returns it. :param dict context: The request context. :raises keystone.exception.Unauthorized: If auth context cannot be found. :returns: The KeystoneToken object. """ try: # Retrieve the auth context that was prepared by AuthContextMiddleware. auth_context = (context['environment'] [authorization.AUTH_CONTEXT_ENV]) return auth_context['token'] except KeyError: LOG.warning(_LW("Couldn't find the auth context.")) raise exception.Unauthorized() URL_RESERVED_CHARS = ":/?#[]@!$&'()*+,;=" def is_not_url_safe(name): """Check if a string contains any url reserved characters.""" return len(list_url_unsafe_chars(name)) > 0 def list_url_unsafe_chars(name): """Return a list of the reserved characters.""" reserved_chars = '' for i in name: if i in URL_RESERVED_CHARS: reserved_chars += i return reserved_chars def lower_case_hostname(url): """Change the URL's hostname to lowercase""" # NOTE(gyee): according to # https://www.w3.org/TR/WD-html40-970708/htmlweb.html, the netloc portion # of the URL is case-insensitive parsed = moves.urllib.parse.urlparse(url) # Note: _replace method for named tuples is public and defined in docs replaced = parsed._replace(netloc=parsed.netloc.lower()) return moves.urllib.parse.urlunparse(replaced) def remove_standard_port(url): # remove the default ports specified in RFC2616 and 2818 o = moves.urllib.parse.urlparse(url) separator = ':' (host, separator, port) = o.netloc.partition(':') if o.scheme.lower() == 'http' and port == '80': # NOTE(gyee): _replace() is not a private method. It has an # an underscore prefix to prevent conflict with field names. # See https://docs.python.org/2/library/collections.html# # collections.namedtuple o = o._replace(netloc=host) if o.scheme.lower() == 'https' and port == '443': o = o._replace(netloc=host) return moves.urllib.parse.urlunparse(o) keystone-9.0.0/keystone/common/__init__.py0000664000567000056710000000000012701407102021744 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/common/tokenless_auth.py0000664000567000056710000001751212701407102023255 0ustar jenkinsjenkins00000000000000# Copyright 2015 Hewlett-Packard # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import hashlib from oslo_config import cfg from oslo_log import log from keystone.auth import controllers from keystone.common import dependency from keystone import exception from keystone.federation import constants as federation_constants from keystone.federation import utils from keystone.i18n import _ CONF = cfg.CONF LOG = log.getLogger(__name__) @dependency.requires('assignment_api', 'federation_api', 'identity_api', 'resource_api') class TokenlessAuthHelper(object): def __init__(self, env): """A init class for TokenlessAuthHelper. :param env: The HTTP request environment that should contain client certificate attributes. These attributes should match with what the mapping defines. Or a user cannot be mapped and results un-authenticated. The following examples are for the attributes that reference to the client certificate's Subject's Common Name and Organization: SSL_CLIENT_S_DN_CN, SSL_CLIENT_S_DN_O :type env: dict """ self.env = env def _build_scope_info(self): """Build the token request scope based on the headers. :returns: scope data :rtype: dict """ project_id = self.env.get('HTTP_X_PROJECT_ID') project_name = self.env.get('HTTP_X_PROJECT_NAME') project_domain_id = self.env.get('HTTP_X_PROJECT_DOMAIN_ID') project_domain_name = self.env.get('HTTP_X_PROJECT_DOMAIN_NAME') domain_id = self.env.get('HTTP_X_DOMAIN_ID') domain_name = self.env.get('HTTP_X_DOMAIN_NAME') scope = {} if project_id: scope['project'] = {'id': project_id} elif project_name: scope['project'] = {'name': project_name} if project_domain_id: scope['project']['domain'] = {'id': project_domain_id} elif project_domain_name: scope['project']['domain'] = {'name': project_domain_name} else: msg = _('Neither Project Domain ID nor Project Domain Name ' 'was provided.') raise exception.ValidationError(msg) elif domain_id: scope['domain'] = {'id': domain_id} elif domain_name: scope['domain'] = {'name': domain_name} else: raise exception.ValidationError( attribute='project or domain', target='scope') return scope def get_scope(self): auth = {} # NOTE(chioleong): Auth methods here are insignificant because # we only care about using auth.controllers.AuthInfo # to validate the scope information. Therefore, # we don't provide any identity. auth['scope'] = self._build_scope_info() # NOTE(chioleong): We'll let AuthInfo validate the scope for us auth_info = controllers.AuthInfo.create({}, auth, scope_only=True) return auth_info.get_scope() def get_mapped_user(self, project_id=None, domain_id=None): """Map client certificate to an existing user. If user is ephemeral, there is no validation on the user himself; however it will be mapped to a corresponding group(s) and the scope of this ephemeral user is the same as what is assigned to the group. :param project_id: Project scope of the mapped user. :param domain_id: Domain scope of the mapped user. :returns: A dictionary that contains the keys, such as user_id, user_name, domain_id, domain_name :rtype: dict """ idp_id = self._build_idp_id() LOG.debug('The IdP Id %s and protocol Id %s are used to look up ' 'the mapping.', idp_id, CONF.tokenless_auth.protocol) mapped_properties, mapping_id = self.federation_api.evaluate( idp_id, CONF.tokenless_auth.protocol, self.env) user = mapped_properties.get('user', {}) user_id = user.get('id') user_name = user.get('name') user_type = user.get('type') if user.get('domain') is not None: user_domain_id = user.get('domain').get('id') user_domain_name = user.get('domain').get('name') else: user_domain_id = None user_domain_name = None # if user is ephemeral type, we don't care if the user exists # or not, but just care if the mapped group(s) is valid. if user_type == utils.UserType.EPHEMERAL: user_ref = {'type': utils.UserType.EPHEMERAL} group_ids = mapped_properties['group_ids'] utils.validate_groups_in_backend(group_ids, mapping_id, self.identity_api) group_ids.extend( utils.transform_to_group_ids( mapped_properties['group_names'], mapping_id, self.identity_api, self.assignment_api)) roles = self.assignment_api.get_roles_for_groups(group_ids, project_id, domain_id) if roles is not None: role_names = [role['name'] for role in roles] user_ref['roles'] = role_names user_ref['group_ids'] = list(group_ids) user_ref[federation_constants.IDENTITY_PROVIDER] = idp_id user_ref[federation_constants.PROTOCOL] = ( CONF.tokenless_auth.protocol) return user_ref if user_id: user_ref = self.identity_api.get_user(user_id) elif user_name and (user_domain_name or user_domain_id): if user_domain_name: user_domain = self.resource_api.get_domain_by_name( user_domain_name) self.resource_api.assert_domain_enabled(user_domain['id'], user_domain) user_domain_id = user_domain['id'] user_ref = self.identity_api.get_user_by_name(user_name, user_domain_id) else: msg = _('User auth cannot be built due to missing either ' 'user id, or user name with domain id, or user name ' 'with domain name.') raise exception.ValidationError(msg) self.identity_api.assert_user_enabled( user_id=user_ref['id'], user=user_ref) user_ref['type'] = utils.UserType.LOCAL return user_ref def _build_idp_id(self): """Build the IdP name from the given config option issuer_attribute. The default issuer attribute SSL_CLIENT_I_DN in the environment is built with the following formula - base64_idp = sha1(env['SSL_CLIENT_I_DN']) :returns: base64_idp like the above example :rtype: str """ idp = self.env.get(CONF.tokenless_auth.issuer_attribute) if idp is None: raise exception.TokenlessAuthConfigError( issuer_attribute=CONF.tokenless_auth.issuer_attribute) hashed_idp = hashlib.sha256(idp.encode('utf-8')) return hashed_idp.hexdigest() keystone-9.0.0/keystone/common/wsgi.py0000664000567000056710000007563112701407102021204 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2010 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utility methods for working with WSGI servers.""" import copy import itertools import re import wsgiref.util from oslo_config import cfg import oslo_i18n from oslo_log import log from oslo_serialization import jsonutils from oslo_utils import importutils from oslo_utils import strutils import routes.middleware import six import webob.dec import webob.exc from keystone.common import dependency from keystone.common import json_home from keystone.common import utils from keystone import exception from keystone.i18n import _ from keystone.i18n import _LI from keystone.i18n import _LW from keystone.models import token_model CONF = cfg.CONF LOG = log.getLogger(__name__) # Environment variable used to pass the request context CONTEXT_ENV = 'openstack.context' # Environment variable used to pass the request params PARAMS_ENV = 'openstack.params' JSON_ENCODE_CONTENT_TYPES = set(['application/json', 'application/json-home']) def validate_token_bind(context, token_ref): bind_mode = CONF.token.enforce_token_bind if bind_mode == 'disabled': return if not isinstance(token_ref, token_model.KeystoneToken): raise exception.UnexpectedError(_('token reference must be a ' 'KeystoneToken type, got: %s') % type(token_ref)) bind = token_ref.bind # permissive and strict modes don't require there to be a bind permissive = bind_mode in ('permissive', 'strict') if not bind: if permissive: # no bind provided and none required return else: LOG.info(_LI("No bind information present in token")) raise exception.Unauthorized() # get the named mode if bind_mode is not one of the known name = None if permissive or bind_mode == 'required' else bind_mode if name and name not in bind: LOG.info(_LI("Named bind mode %s not in bind information"), name) raise exception.Unauthorized() for bind_type, identifier in bind.items(): if bind_type == 'kerberos': if not (context['environment'].get('AUTH_TYPE', '').lower() == 'negotiate'): LOG.info(_LI("Kerberos credentials required and not present")) raise exception.Unauthorized() if not context['environment'].get('REMOTE_USER') == identifier: LOG.info(_LI("Kerberos credentials do not match " "those in bind")) raise exception.Unauthorized() LOG.info(_LI("Kerberos bind authentication successful")) elif bind_mode == 'permissive': LOG.debug(("Ignoring unknown bind for permissive mode: " "{%(bind_type)s: %(identifier)s}"), {'bind_type': bind_type, 'identifier': identifier}) else: LOG.info(_LI("Couldn't verify unknown bind: " "{%(bind_type)s: %(identifier)s}"), {'bind_type': bind_type, 'identifier': identifier}) raise exception.Unauthorized() def best_match_language(req): """Determines the best available locale. This returns best available locale based on the Accept-Language HTTP header passed in the request. """ if not req.accept_language: return None return req.accept_language.best_match( oslo_i18n.get_available_languages('keystone')) class BaseApplication(object): """Base WSGI application wrapper. Subclasses need to implement __call__.""" @classmethod def factory(cls, global_config, **local_config): """Used for paste app factories in paste.deploy config files. Any local configuration (that is, values under the [app:APPNAME] section of the paste config) will be passed into the `__init__` method as kwargs. A hypothetical configuration would look like: [app:wadl] latest_version = 1.3 paste.app_factory = keystone.fancy_api:Wadl.factory which would result in a call to the `Wadl` class as import keystone.fancy_api keystone.fancy_api.Wadl(latest_version='1.3') You could of course re-implement the `factory` method in subclasses, but using the kwarg passing it shouldn't be necessary. """ return cls(**local_config) def __call__(self, environ, start_response): r"""Subclasses will probably want to implement __call__ like this: @webob.dec.wsgify() def __call__(self, req): # Any of the following objects work as responses: # Option 1: simple string res = 'message\n' # Option 2: a nicely formatted HTTP exception page res = exc.HTTPForbidden(explanation='Nice try') # Option 3: a webob Response object (in case you need to play with # headers, or you want to be treated like an iterable, or or or) res = Response(); res.app_iter = open('somefile') # Option 4: any wsgi app to be run next res = self.application # Option 5: you can get a Response object for a wsgi app, too, to # play with headers etc res = req.get_response(self.application) # You can then just return your response... return res # ... or set req.response and return None. req.response = res See the end of http://pythonpaste.org/webob/modules/dec.html for more info. """ raise NotImplementedError('You must implement __call__') @dependency.requires('assignment_api', 'policy_api', 'token_provider_api') class Application(BaseApplication): @webob.dec.wsgify() def __call__(self, req): arg_dict = req.environ['wsgiorg.routing_args'][1] action = arg_dict.pop('action') del arg_dict['controller'] # allow middleware up the stack to provide context, params and headers. context = req.environ.get(CONTEXT_ENV, {}) try: context['query_string'] = dict(req.params.items()) except UnicodeDecodeError as e: # The webob package throws UnicodeError when a request cannot be # decoded. Raise ValidationError instead to avoid an UnknownError. msg = _('Query string is not UTF-8 encoded') raise exception.ValidationError(msg) context['headers'] = dict(req.headers.items()) context['path'] = req.environ['PATH_INFO'] scheme = req.environ.get(CONF.secure_proxy_ssl_header) if scheme: # NOTE(andrey-mp): "wsgi.url_scheme" contains the protocol used # before the proxy removed it ('https' usually). So if # the webob.Request instance is modified in order to use this # scheme instead of the one defined by API, the call to # webob.Request.relative_url() will return a URL with the correct # scheme. req.environ['wsgi.url_scheme'] = scheme context['host_url'] = req.host_url params = req.environ.get(PARAMS_ENV, {}) # authentication and authorization attributes are set as environment # values by the container and processed by the pipeline. The complete # set is not yet known. context['environment'] = req.environ context['accept_header'] = req.accept req.environ = None params.update(arg_dict) context.setdefault('is_admin', False) # TODO(termie): do some basic normalization on methods method = getattr(self, action) # NOTE(morganfainberg): use the request method to normalize the # response code between GET and HEAD requests. The HTTP status should # be the same. LOG.info('%(req_method)s %(uri)s', { 'req_method': req.environ['REQUEST_METHOD'].upper(), 'uri': wsgiref.util.request_uri(req.environ), }) params = self._normalize_dict(params) try: result = method(context, **params) except exception.Unauthorized as e: LOG.warning( _LW("Authorization failed. %(exception)s from " "%(remote_addr)s"), {'exception': e, 'remote_addr': req.environ['REMOTE_ADDR']}) return render_exception(e, context=context, user_locale=best_match_language(req)) except exception.Error as e: LOG.warning(six.text_type(e)) return render_exception(e, context=context, user_locale=best_match_language(req)) except TypeError as e: LOG.exception(six.text_type(e)) return render_exception(exception.ValidationError(e), context=context, user_locale=best_match_language(req)) except Exception as e: LOG.exception(six.text_type(e)) return render_exception(exception.UnexpectedError(exception=e), context=context, user_locale=best_match_language(req)) if result is None: return render_response(status=(204, 'No Content')) elif isinstance(result, six.string_types): return result elif isinstance(result, webob.Response): return result elif isinstance(result, webob.exc.WSGIHTTPException): return result response_code = self._get_response_code(req) return render_response(body=result, status=response_code, method=req.environ['REQUEST_METHOD']) def _get_response_code(self, req): req_method = req.environ['REQUEST_METHOD'] controller = importutils.import_class('keystone.common.controller') code = None if isinstance(self, controller.V3Controller) and req_method == 'POST': code = (201, 'Created') return code def _normalize_arg(self, arg): return arg.replace(':', '_').replace('-', '_') def _normalize_dict(self, d): return {self._normalize_arg(k): v for (k, v) in d.items()} def assert_admin(self, context): """Ensure the user is an admin. :raises keystone.exception.Unauthorized: if a token could not be found/authorized, a user is invalid, or a tenant is invalid/not scoped. :raises keystone.exception.Forbidden: if the user is not an admin and does not have the admin role """ if not context['is_admin']: user_token_ref = utils.get_token_ref(context) validate_token_bind(context, user_token_ref) creds = copy.deepcopy(user_token_ref.metadata) try: creds['user_id'] = user_token_ref.user_id except exception.UnexpectedError: LOG.debug('Invalid user') raise exception.Unauthorized() if user_token_ref.project_scoped: creds['tenant_id'] = user_token_ref.project_id else: LOG.debug('Invalid tenant') raise exception.Unauthorized() creds['roles'] = user_token_ref.role_names # Accept either is_admin or the admin role self.policy_api.enforce(creds, 'admin_required', {}) def _attribute_is_empty(self, ref, attribute): """Determine if the attribute in ref is empty or None.""" return ref.get(attribute) is None or ref.get(attribute) == '' def _require_attribute(self, ref, attribute): """Ensures the reference contains the specified attribute. Raise a ValidationError if the given attribute is not present """ if self._attribute_is_empty(ref, attribute): msg = _('%s field is required and cannot be empty') % attribute raise exception.ValidationError(message=msg) def _require_attributes(self, ref, attrs): """Ensures the reference contains the specified attributes. Raise a ValidationError if any of the given attributes is not present """ missing_attrs = [attribute for attribute in attrs if self._attribute_is_empty(ref, attribute)] if missing_attrs: msg = _('%s field(s) cannot be empty') % ', '.join(missing_attrs) raise exception.ValidationError(message=msg) def _get_trust_id_for_request(self, context): """Get the trust_id for a call. Retrieve the trust_id from the token Returns None if token is not trust scoped """ if ('token_id' not in context or context.get('token_id') == CONF.admin_token): LOG.debug(('will not lookup trust as the request auth token is ' 'either absent or it is the system admin token')) return None token_ref = utils.get_token_ref(context) return token_ref.trust_id @classmethod def base_url(cls, context, endpoint_type): url = CONF['%s_endpoint' % endpoint_type] if url: substitutions = dict( itertools.chain(CONF.items(), CONF.eventlet_server.items())) url = url % substitutions elif 'environment' in context: url = wsgiref.util.application_uri(context['environment']) # remove version from the URL as it may be part of SCRIPT_NAME but # it should not be part of base URL url = re.sub(r'/v(3|(2\.0))/*$', '', url) # now remove the standard port url = utils.remove_standard_port(url) else: # if we don't have enough information to come up with a base URL, # then fall back to localhost. This should never happen in # production environment. url = 'http://localhost:%d' % CONF.eventlet_server.public_port return url.rstrip('/') class Middleware(Application): """Base WSGI middleware. These classes require an application to be initialized that will be called next. By default the middleware will simply call its wrapped app, or you can override __call__ to customize its behavior. """ @classmethod def factory(cls, global_config): """Used for paste app factories in paste.deploy config files.""" def _factory(app): return cls(app) return _factory def __init__(self, application): super(Middleware, self).__init__() self.application = application def process_request(self, request): """Called on each request. If this returns None, the next application down the stack will be executed. If it returns a response then that response will be returned and execution will stop here. """ return None def process_response(self, request, response): """Do whatever you'd like to the response, based on the request.""" return response @webob.dec.wsgify() def __call__(self, request): try: response = self.process_request(request) if response: return response response = request.get_response(self.application) return self.process_response(request, response) except exception.Error as e: LOG.warning(six.text_type(e)) return render_exception(e, request=request, user_locale=best_match_language(request)) except TypeError as e: LOG.exception(six.text_type(e)) return render_exception(exception.ValidationError(e), request=request, user_locale=best_match_language(request)) except Exception as e: LOG.exception(six.text_type(e)) return render_exception(exception.UnexpectedError(exception=e), request=request, user_locale=best_match_language(request)) class Debug(Middleware): """Helper class for debugging a WSGI application. Can be inserted into any WSGI application chain to get information about the request and response. """ @webob.dec.wsgify() def __call__(self, req): if not hasattr(LOG, 'isEnabledFor') or LOG.isEnabledFor(LOG.debug): LOG.debug('%s %s %s', ('*' * 20), 'REQUEST ENVIRON', ('*' * 20)) for key, value in req.environ.items(): LOG.debug('%s = %s', key, strutils.mask_password(value)) LOG.debug('') LOG.debug('%s %s %s', ('*' * 20), 'REQUEST BODY', ('*' * 20)) for line in req.body_file: LOG.debug('%s', strutils.mask_password(line)) LOG.debug('') resp = req.get_response(self.application) if not hasattr(LOG, 'isEnabledFor') or LOG.isEnabledFor(LOG.debug): LOG.debug('%s %s %s', ('*' * 20), 'RESPONSE HEADERS', ('*' * 20)) for (key, value) in resp.headers.items(): LOG.debug('%s = %s', key, value) LOG.debug('') resp.app_iter = self.print_generator(resp.app_iter) return resp @staticmethod def print_generator(app_iter): """Iterator that prints the contents of a wrapper string.""" LOG.debug('%s %s %s', ('*' * 20), 'RESPONSE BODY', ('*' * 20)) for part in app_iter: LOG.debug(part) yield part class Router(object): """WSGI middleware that maps incoming requests to WSGI apps.""" def __init__(self, mapper): """Create a router for the given routes.Mapper. Each route in `mapper` must specify a 'controller', which is a WSGI app to call. You'll probably want to specify an 'action' as well and have your controller be an object that can route the request to the action-specific method. Examples: mapper = routes.Mapper() sc = ServerController() # Explicit mapping of one route to a controller+action mapper.connect(None, '/svrlist', controller=sc, action='list') # Actions are all implicitly defined mapper.resource('server', 'servers', controller=sc) # Pointing to an arbitrary WSGI app. You can specify the # {path_info:.*} parameter so the target app can be handed just that # section of the URL. mapper.connect(None, '/v1.0/{path_info:.*}', controller=BlogApp()) """ self.map = mapper self._router = routes.middleware.RoutesMiddleware(self._dispatch, self.map) @webob.dec.wsgify() def __call__(self, req): """Route the incoming request to a controller based on self.map. If no match, return a 404. """ return self._router @staticmethod @webob.dec.wsgify() def _dispatch(req): """Dispatch the request to the appropriate controller. Called by self._router after matching the incoming request to a route and putting the information into req.environ. Either returns 404 or the routed WSGI app's response. """ match = req.environ['wsgiorg.routing_args'][1] if not match: msg = _('The resource could not be found.') return render_exception(exception.NotFound(msg), request=req, user_locale=best_match_language(req)) app = match['controller'] return app class ComposingRouter(Router): def __init__(self, mapper=None, routers=None): if mapper is None: mapper = routes.Mapper() if routers is None: routers = [] for router in routers: router.add_routes(mapper) super(ComposingRouter, self).__init__(mapper) class ComposableRouter(Router): """Router that supports use by ComposingRouter.""" def __init__(self, mapper=None): if mapper is None: mapper = routes.Mapper() self.add_routes(mapper) super(ComposableRouter, self).__init__(mapper) def add_routes(self, mapper): """Add routes to given mapper.""" pass class ExtensionRouter(Router): """A router that allows extensions to supplement or overwrite routes. Expects to be subclassed. """ def __init__(self, application, mapper=None): if mapper is None: mapper = routes.Mapper() self.application = application self.add_routes(mapper) mapper.connect('/{path_info:.*}', controller=self.application) super(ExtensionRouter, self).__init__(mapper) def add_routes(self, mapper): pass @classmethod def factory(cls, global_config, **local_config): """Used for paste app factories in paste.deploy config files. Any local configuration (that is, values under the [filter:APPNAME] section of the paste config) will be passed into the `__init__` method as kwargs. A hypothetical configuration would look like: [filter:analytics] redis_host = 127.0.0.1 paste.filter_factory = keystone.analytics:Analytics.factory which would result in a call to the `Analytics` class as import keystone.analytics keystone.analytics.Analytics(app, redis_host='127.0.0.1') You could of course re-implement the `factory` method in subclasses, but using the kwarg passing it shouldn't be necessary. """ def _factory(app): conf = global_config.copy() conf.update(local_config) return cls(app, **local_config) return _factory class RoutersBase(object): """Base class for Routers.""" def __init__(self): self.v3_resources = [] def append_v3_routers(self, mapper, routers): """Append v3 routers. Subclasses should override this method to map its routes. Use self._add_resource() to map routes for a resource. """ def _add_resource(self, mapper, controller, path, rel, get_action=None, head_action=None, get_head_action=None, put_action=None, post_action=None, patch_action=None, delete_action=None, get_post_action=None, path_vars=None, status=json_home.Status.STABLE, new_path=None): if get_head_action: getattr(controller, get_head_action) # ensure the attribute exists mapper.connect(path, controller=controller, action=get_head_action, conditions=dict(method=['GET', 'HEAD'])) if get_action: getattr(controller, get_action) # ensure the attribute exists mapper.connect(path, controller=controller, action=get_action, conditions=dict(method=['GET'])) if head_action: getattr(controller, head_action) # ensure the attribute exists mapper.connect(path, controller=controller, action=head_action, conditions=dict(method=['HEAD'])) if put_action: getattr(controller, put_action) # ensure the attribute exists mapper.connect(path, controller=controller, action=put_action, conditions=dict(method=['PUT'])) if post_action: getattr(controller, post_action) # ensure the attribute exists mapper.connect(path, controller=controller, action=post_action, conditions=dict(method=['POST'])) if patch_action: getattr(controller, patch_action) # ensure the attribute exists mapper.connect(path, controller=controller, action=patch_action, conditions=dict(method=['PATCH'])) if delete_action: getattr(controller, delete_action) # ensure the attribute exists mapper.connect(path, controller=controller, action=delete_action, conditions=dict(method=['DELETE'])) if get_post_action: getattr(controller, get_post_action) # ensure the attribute exists mapper.connect(path, controller=controller, action=get_post_action, conditions=dict(method=['GET', 'POST'])) resource_data = dict() if path_vars: resource_data['href-template'] = new_path or path resource_data['href-vars'] = path_vars else: resource_data['href'] = new_path or path json_home.Status.update_resource_data(resource_data, status) self.v3_resources.append((rel, resource_data)) class V3ExtensionRouter(ExtensionRouter, RoutersBase): """Base class for V3 extension router.""" def __init__(self, application, mapper=None): self.v3_resources = list() super(V3ExtensionRouter, self).__init__(application, mapper) def _update_version_response(self, response_data): response_data['resources'].update(self.v3_resources) @webob.dec.wsgify() def __call__(self, request): if request.path_info != '/': # Not a request for version info so forward to super. return super(V3ExtensionRouter, self).__call__(request) response = request.get_response(self.application) if response.status_code != 200: # The request failed, so don't update the response. return response if response.headers['Content-Type'] != 'application/json-home': # Not a request for JSON Home document, so don't update the # response. return response response_data = jsonutils.loads(response.body) self._update_version_response(response_data) response.body = jsonutils.dump_as_bytes(response_data, cls=utils.SmarterEncoder) return response def render_response(body=None, status=None, headers=None, method=None): """Forms a WSGI response.""" if headers is None: headers = [] else: headers = list(headers) headers.append(('Vary', 'X-Auth-Token')) if body is None: body = b'' status = status or (204, 'No Content') else: content_types = [v for h, v in headers if h == 'Content-Type'] if content_types: content_type = content_types[0] else: content_type = None if content_type is None or content_type in JSON_ENCODE_CONTENT_TYPES: body = jsonutils.dump_as_bytes(body, cls=utils.SmarterEncoder) if content_type is None: headers.append(('Content-Type', 'application/json')) status = status or (200, 'OK') # NOTE(davechen): `mod_wsgi` follows the standards from pep-3333 and # requires the value in response header to be binary type(str) on python2, # unicode based string(str) on python3, or else keystone will not work # under apache with `mod_wsgi`. # keystone needs to check the data type of each header and convert the # type if needed. # see bug: # https://bugs.launchpad.net/keystone/+bug/1528981 # see pep-3333: # https://www.python.org/dev/peps/pep-3333/#a-note-on-string-types # see source from mod_wsgi: # https://github.com/GrahamDumpleton/mod_wsgi(methods: # wsgi_convert_headers_to_bytes(...), wsgi_convert_string_to_bytes(...) # and wsgi_validate_header_value(...)). def _convert_to_str(headers): str_headers = [] for header in headers: str_header = [] for value in header: if not isinstance(value, str): str_header.append(str(value)) else: str_header.append(value) # convert the list to the immutable tuple to build the headers. # header's key/value will be guaranteed to be str type. str_headers.append(tuple(str_header)) return str_headers headers = _convert_to_str(headers) resp = webob.Response(body=body, status='%s %s' % status, headerlist=headers) if method and method.upper() == 'HEAD': # NOTE(morganfainberg): HEAD requests should return the same status # as a GET request and same headers (including content-type and # content-length). The webob.Response object automatically changes # content-length (and other headers) if the body is set to b''. Capture # all headers and reset them on the response object after clearing the # body. The body can only be set to a binary-type (not TextType or # NoneType), so b'' is used here and should be compatible with # both py2x and py3x. stored_headers = resp.headers.copy() resp.body = b'' for header, value in stored_headers.items(): resp.headers[header] = value return resp def render_exception(error, context=None, request=None, user_locale=None): """Forms a WSGI response based on the current error.""" error_message = error.args[0] message = oslo_i18n.translate(error_message, desired_locale=user_locale) if message is error_message: # translate() didn't do anything because it wasn't a Message, # convert to a string. message = six.text_type(message) body = {'error': { 'code': error.code, 'title': error.title, 'message': message, }} headers = [] if isinstance(error, exception.AuthPluginException): body['error']['identity'] = error.authentication elif isinstance(error, exception.Unauthorized): # NOTE(gyee): we only care about the request environment in the # context. Also, its OK to pass the environemt as it is read-only in # Application.base_url() local_context = {} if request: local_context = {'environment': request.environ} elif context and 'environment' in context: local_context = {'environment': context['environment']} url = Application.base_url(local_context, 'public') headers.append(('WWW-Authenticate', 'Keystone uri="%s"' % url)) return render_response(status=(error.code, error.title), body=body, headers=headers) keystone-9.0.0/keystone/common/clean.py0000664000567000056710000000526512701407102021311 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import six from keystone import exception from keystone.i18n import _ def check_length(property_name, value, min_length=1, max_length=64): if len(value) < min_length: if min_length == 1: msg = _("%s cannot be empty.") % property_name else: msg = (_("%(property_name)s cannot be less than " "%(min_length)s characters.") % dict( property_name=property_name, min_length=min_length)) raise exception.ValidationError(msg) if len(value) > max_length: msg = (_("%(property_name)s should not be greater than " "%(max_length)s characters.") % dict( property_name=property_name, max_length=max_length)) raise exception.ValidationError(msg) def check_type(property_name, value, expected_type, display_expected_type): if not isinstance(value, expected_type): msg = (_("%(property_name)s is not a " "%(display_expected_type)s") % dict( property_name=property_name, display_expected_type=display_expected_type)) raise exception.ValidationError(msg) def check_enabled(property_name, enabled): # Allow int and it's subclass bool check_type('%s enabled' % property_name, enabled, int, 'boolean') return bool(enabled) def check_name(property_name, name, min_length=1, max_length=64): check_type('%s name' % property_name, name, six.string_types, 'str or unicode') name = name.strip() check_length('%s name' % property_name, name, min_length=min_length, max_length=max_length) return name def domain_name(name): return check_name('Domain', name) def domain_enabled(enabled): return check_enabled('Domain', enabled) def project_name(name): return check_name('Project', name) def project_enabled(enabled): return check_enabled('Project', enabled) def user_name(name): return check_name('User', name, max_length=255) def user_enabled(enabled): return check_enabled('User', enabled) def group_name(name): return check_name('Group', name) keystone-9.0.0/keystone/common/manager.py0000664000567000056710000001764712701407102021650 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import inspect import time import types from oslo_log import log from oslo_log import versionutils from oslo_utils import importutils from oslo_utils import reflection import six import stevedore from keystone.i18n import _ LOG = log.getLogger(__name__) def response_truncated(f): """Truncate the list returned by the wrapped function. This is designed to wrap Manager list_{entity} methods to ensure that any list limits that are defined are passed to the driver layer. If a hints list is provided, the wrapper will insert the relevant limit into the hints so that the underlying driver call can try and honor it. If the driver does truncate the response, it will update the 'truncated' attribute in the 'limit' entry in the hints list, which enables the caller of this function to know if truncation has taken place. If, however, the driver layer is unable to perform truncation, the 'limit' entry is simply left in the hints list for the caller to handle. A _get_list_limit() method is required to be present in the object class hierarchy, which returns the limit for this backend to which we will truncate. If a hints list is not provided in the arguments of the wrapped call then any limits set in the config file are ignored. This allows internal use of such wrapped methods where the entire data set is needed as input for the calculations of some other API (e.g. get role assignments for a given project). """ @functools.wraps(f) def wrapper(self, *args, **kwargs): if kwargs.get('hints') is None: return f(self, *args, **kwargs) list_limit = self.driver._get_list_limit() if list_limit: kwargs['hints'].set_limit(list_limit) return f(self, *args, **kwargs) return wrapper def load_driver(namespace, driver_name, *args): try: driver_manager = stevedore.DriverManager(namespace, driver_name, invoke_on_load=True, invoke_args=args) return driver_manager.driver except RuntimeError as e: LOG.debug('Failed to load %r using stevedore: %s', driver_name, e) # Ignore failure and continue on. driver = importutils.import_object(driver_name, *args) msg = (_( 'Direct import of driver %(name)r is deprecated as of Liberty in ' 'favor of its entrypoint from %(namespace)r and may be removed in ' 'N.') % {'name': driver_name, 'namespace': namespace}) versionutils.report_deprecated_feature(LOG, msg) return driver class _TraceMeta(type): """A metaclass that, in trace mode, will log entry and exit of methods. This metaclass automatically wraps all methods on the class when instantiated with a decorator that will log entry/exit from a method when keystone is run in Trace log level. """ @staticmethod def wrapper(__f, __classname): __argspec = inspect.getargspec(__f) __fn_info = '%(module)s.%(classname)s.%(funcname)s' % { 'module': inspect.getmodule(__f).__name__, 'classname': __classname, 'funcname': __f.__name__ } # NOTE(morganfainberg): Omit "cls" and "self" when printing trace logs # the index can be calculated at wrap time rather than at runtime. if __argspec.args and __argspec.args[0] in ('self', 'cls'): __arg_idx = 1 else: __arg_idx = 0 @functools.wraps(__f) def wrapped(*args, **kwargs): __exc = None __t = time.time() __do_trace = LOG.logger.getEffectiveLevel() <= log.TRACE __ret_val = None try: if __do_trace: LOG.trace('CALL => %s', __fn_info) __ret_val = __f(*args, **kwargs) except Exception as e: # nosec __exc = e raise finally: if __do_trace: __subst = { 'run_time': (time.time() - __t), 'passed_args': ', '.join([ ', '.join([repr(a) for a in args[__arg_idx:]]), ', '.join(['%(k)s=%(v)r' % {'k': k, 'v': v} for k, v in kwargs.items()]), ]), 'function': __fn_info, 'exception': __exc, 'ret_val': __ret_val, } if __exc is not None: __msg = ('[%(run_time)ss] %(function)s ' '(%(passed_args)s) => raised ' '%(exception)r') else: # TODO(morganfainberg): find a way to indicate if this # was a cache hit or cache miss. __msg = ('[%(run_time)ss] %(function)s' '(%(passed_args)s) => %(ret_val)r') LOG.trace(__msg, __subst) return __ret_val return wrapped def __new__(meta, classname, bases, class_dict): final_cls_dict = {} for attr_name, attr in class_dict.items(): # NOTE(morganfainberg): only wrap public instances and methods. if (isinstance(attr, types.FunctionType) and not attr_name.startswith('_')): attr = _TraceMeta.wrapper(attr, classname) final_cls_dict[attr_name] = attr return type.__new__(meta, classname, bases, final_cls_dict) @six.add_metaclass(_TraceMeta) class Manager(object): """Base class for intermediary request layer. The Manager layer exists to support additional logic that applies to all or some of the methods exposed by a service that are not specific to the HTTP interface. It also provides a stable entry point to dynamic backends. An example of a probable use case is logging all the calls. """ driver_namespace = None def __init__(self, driver_name): self.driver = load_driver(self.driver_namespace, driver_name) def __getattr__(self, name): """Forward calls to the underlying driver.""" f = getattr(self.driver, name) setattr(self, name, f) return f def create_legacy_driver(driver_class): """Helper function to deprecate the original driver classes. The keystone.{subsystem}.Driver classes are deprecated in favor of the new versioned classes. This function creates a new class based on a versioned class and adds a deprecation message when it is used. This will allow existing custom drivers to work when the Driver class is renamed to include a version. Example usage: Driver = create_legacy_driver(CatalogDriverV8) """ module_name = driver_class.__module__ class_name = reflection.get_class_name(driver_class) class Driver(driver_class): @versionutils.deprecated( as_of=versionutils.deprecated.LIBERTY, what='%s.Driver' % module_name, in_favor_of=class_name, remove_in=+2) def __init__(self, *args, **kwargs): super(Driver, self).__init__(*args, **kwargs) return Driver keystone-9.0.0/keystone/common/controller.py0000664000567000056710000010022412701407102022401 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import uuid from oslo_config import cfg from oslo_log import log from oslo_log import versionutils from oslo_utils import strutils import six from keystone.common import authorization from keystone.common import dependency from keystone.common import driver_hints from keystone.common import utils from keystone.common import wsgi from keystone import exception from keystone.i18n import _, _LW from keystone.models import token_model LOG = log.getLogger(__name__) CONF = cfg.CONF def v2_deprecated(f): @six.wraps(f) def wrapper(*args, **kwargs): deprecated = versionutils.deprecated( what=f.__name__ + ' of the v2 API', as_of=versionutils.deprecated.MITAKA, in_favor_of='a similar function in the v3 API', remove_in=+4) return deprecated(f) return wrapper() def v2_ec2_deprecated(f): @six.wraps(f) def wrapper(*args, **kwargs): deprecated = versionutils.deprecated( what=f.__name__ + ' of the v2 EC2 APIs', as_of=versionutils.deprecated.MITAKA, in_favor_of=('a similar function in the v3 Credential APIs'), remove_in=0) return deprecated(f) return wrapper() def v2_auth_deprecated(f): @six.wraps(f) def wrapper(*args, **kwargs): deprecated = versionutils.deprecated( what=f.__name__ + ' of the v2 Authentication APIs', as_of=versionutils.deprecated.MITAKA, in_favor_of=('a similar function in the v3 Authentication APIs'), remove_in=0) return deprecated(f) return wrapper() def _build_policy_check_credentials(self, action, context, kwargs): kwargs_str = ', '.join(['%s=%s' % (k, kwargs[k]) for k in kwargs]) kwargs_str = strutils.mask_password(kwargs_str) LOG.debug('RBAC: Authorizing %(action)s(%(kwargs)s)', { 'action': action, 'kwargs': kwargs_str}) # see if auth context has already been created. If so use it. if ('environment' in context and authorization.AUTH_CONTEXT_ENV in context['environment']): LOG.debug('RBAC: using auth context from the request environment') return context['environment'].get(authorization.AUTH_CONTEXT_ENV) # There is no current auth context, build it from the incoming token. # TODO(morganfainberg): Collapse this logic with AuthContextMiddleware # in a sane manner as this just mirrors the logic in AuthContextMiddleware try: LOG.debug('RBAC: building auth context from the incoming auth token') token_ref = token_model.KeystoneToken( token_id=context['token_id'], token_data=self.token_provider_api.validate_token( context['token_id'])) # NOTE(jamielennox): whilst this maybe shouldn't be within this # function it would otherwise need to reload the token_ref from # backing store. wsgi.validate_token_bind(context, token_ref) except exception.TokenNotFound: LOG.warning(_LW('RBAC: Invalid token')) raise exception.Unauthorized() auth_context = authorization.token_to_auth_context(token_ref) return auth_context def protected(callback=None): """Wraps API calls with role based access controls (RBAC). This handles both the protection of the API parameters as well as any target entities for single-entity API calls. More complex API calls (for example that deal with several different entities) should pass in a callback function, that will be subsequently called to check protection for these multiple entities. This callback function should gather the appropriate entities needed and then call check_protection() in the V3Controller class. """ def wrapper(f): @functools.wraps(f) def inner(self, context, *args, **kwargs): if 'is_admin' in context and context['is_admin']: LOG.warning(_LW('RBAC: Bypassing authorization')) elif callback is not None: prep_info = {'f_name': f.__name__, 'input_attr': kwargs} callback(self, context, prep_info, *args, **kwargs) else: action = 'identity:%s' % f.__name__ creds = _build_policy_check_credentials(self, action, context, kwargs) policy_dict = {} # Check to see if we need to include the target entity in our # policy checks. We deduce this by seeing if the class has # specified a get_member() method and that kwargs contains the # appropriate entity id. if (hasattr(self, 'get_member_from_driver') and self.get_member_from_driver is not None): key = '%s_id' % self.member_name if key in kwargs: ref = self.get_member_from_driver(kwargs[key]) policy_dict['target'] = {self.member_name: ref} # TODO(henry-nash): Move this entire code to a member # method inside v3 Auth if context.get('subject_token_id') is not None: token_ref = token_model.KeystoneToken( token_id=context['subject_token_id'], token_data=self.token_provider_api.validate_token( context['subject_token_id'])) policy_dict.setdefault('target', {}) policy_dict['target'].setdefault(self.member_name, {}) policy_dict['target'][self.member_name]['user_id'] = ( token_ref.user_id) try: user_domain_id = token_ref.user_domain_id except exception.UnexpectedError: user_domain_id = None if user_domain_id: policy_dict['target'][self.member_name].setdefault( 'user', {}) policy_dict['target'][self.member_name][ 'user'].setdefault('domain', {}) policy_dict['target'][self.member_name]['user'][ 'domain']['id'] = ( user_domain_id) # Add in the kwargs, which means that any entity provided as a # parameter for calls like create and update will be included. policy_dict.update(kwargs) self.policy_api.enforce(creds, action, utils.flatten_dict(policy_dict)) LOG.debug('RBAC: Authorization granted') return f(self, context, *args, **kwargs) return inner return wrapper def filterprotected(*filters, **callback): """Wraps API list calls with role based access controls (RBAC). This handles both the protection of the API parameters as well as any filters supplied. More complex API list calls (for example that need to examine the contents of an entity referenced by one of the filters) should pass in a callback function, that will be subsequently called to check protection for these multiple entities. This callback function should gather the appropriate entities needed and then call check_protection() in the V3Controller class. """ def _filterprotected(f): @functools.wraps(f) def wrapper(self, context, **kwargs): if not context['is_admin']: # The target dict for the policy check will include: # # - Any query filter parameters # - Data from the main url (which will be in the kwargs # parameter), which although most of our APIs do not utilize, # in theory you could have. # # First build the dict of filter parameters target = dict() if filters: for item in filters: if item in context['query_string']: target[item] = context['query_string'][item] LOG.debug('RBAC: Adding query filter params (%s)', ( ', '.join(['%s=%s' % (item, target[item]) for item in target]))) if 'callback' in callback and callback['callback'] is not None: # A callback has been specified to load additional target # data, so pass it the formal url params as well as the # list of filters, so it can augment these and then call # the check_protection() method. prep_info = {'f_name': f.__name__, 'input_attr': kwargs, 'filter_attr': target} callback['callback'](self, context, prep_info, **kwargs) else: # No callback, so we are going to check the protection here action = 'identity:%s' % f.__name__ creds = _build_policy_check_credentials(self, action, context, kwargs) # Add in any formal url parameters for key in kwargs: target[key] = kwargs[key] self.policy_api.enforce(creds, action, utils.flatten_dict(target)) LOG.debug('RBAC: Authorization granted') else: LOG.warning(_LW('RBAC: Bypassing authorization')) return f(self, context, filters, **kwargs) return wrapper return _filterprotected class V2Controller(wsgi.Application): """Base controller class for Identity API v2.""" def _normalize_domain_id(self, context, ref): """Fill in domain_id since v2 calls are not domain-aware. This will overwrite any domain_id that was inadvertently specified in the v2 call. """ ref['domain_id'] = CONF.identity.default_domain_id return ref @staticmethod def filter_domain_id(ref): """Remove domain_id since v2 calls are not domain-aware.""" ref.pop('domain_id', None) return ref @staticmethod def filter_domain(ref): """Remove domain since v2 calls are not domain-aware.""" ref.pop('domain', None) return ref @staticmethod def filter_project_parent_id(ref): """Remove parent_id since v2 calls are not hierarchy-aware.""" ref.pop('parent_id', None) return ref @staticmethod def filter_is_domain(ref): """Remove is_domain field since v2 calls are not domain-aware.""" ref.pop('is_domain', None) return ref @staticmethod def normalize_username_in_response(ref): """Adds username to outgoing user refs to match the v2 spec. Internally we use `name` to represent a user's name. The v2 spec requires the use of `username` instead. """ if 'username' not in ref and 'name' in ref: ref['username'] = ref['name'] return ref @staticmethod def normalize_username_in_request(ref): """Adds name in incoming user refs to match the v2 spec. Internally we use `name` to represent a user's name. The v2 spec requires the use of `username` instead. """ if 'name' not in ref and 'username' in ref: ref['name'] = ref.pop('username') return ref @staticmethod def v3_to_v2_user(ref): """Convert a user_ref from v3 to v2 compatible. * v2.0 users are not domain aware, and should have domain_id removed * v2.0 users expect the use of tenantId instead of default_project_id * v2.0 users have a username attribute If ref is a list type, we will iterate through each element and do the conversion. """ def _format_default_project_id(ref): """Convert default_project_id to tenantId for v2 calls.""" default_project_id = ref.pop('default_project_id', None) if default_project_id is not None: ref['tenantId'] = default_project_id elif 'tenantId' in ref: # NOTE(morganfainberg): To avoid v2.0 confusion if somehow a # tenantId property sneaks its way into the extra blob on the # user, we remove it here. If default_project_id is set, we # would override it in either case. del ref['tenantId'] def _normalize_and_filter_user_properties(ref): """Run through the various filter/normalization methods.""" _format_default_project_id(ref) V2Controller.filter_domain(ref) V2Controller.filter_domain_id(ref) V2Controller.normalize_username_in_response(ref) return ref if isinstance(ref, dict): return _normalize_and_filter_user_properties(ref) elif isinstance(ref, list): return [_normalize_and_filter_user_properties(x) for x in ref] else: raise ValueError(_('Expected dict or list: %s') % type(ref)) @staticmethod def v3_to_v2_project(ref): """Convert a project_ref from v3 to v2. * v2.0 projects are not domain aware, and should have domain_id removed * v2.0 projects are not hierarchy aware, and should have parent_id removed This method should only be applied to project_refs being returned from the v2.0 controller(s). If ref is a list type, we will iterate through each element and do the conversion. """ def _filter_project_properties(ref): """Run through the various filter methods.""" V2Controller.filter_domain_id(ref) V2Controller.filter_project_parent_id(ref) V2Controller.filter_is_domain(ref) return ref if isinstance(ref, dict): return _filter_project_properties(ref) elif isinstance(ref, list): return [_filter_project_properties(x) for x in ref] else: raise ValueError(_('Expected dict or list: %s') % type(ref)) def format_project_list(self, tenant_refs, **kwargs): """Format a v2 style project list, including marker/limits.""" marker = kwargs.get('marker') first_index = 0 if marker is not None: for (marker_index, tenant) in enumerate(tenant_refs): if tenant['id'] == marker: # we start pagination after the marker first_index = marker_index + 1 break else: msg = _('Marker could not be found') raise exception.ValidationError(message=msg) limit = kwargs.get('limit') last_index = None if limit is not None: try: limit = int(limit) if limit < 0: raise AssertionError() except (ValueError, AssertionError): msg = _('Invalid limit value') raise exception.ValidationError(message=msg) last_index = first_index + limit tenant_refs = tenant_refs[first_index:last_index] for x in tenant_refs: if 'enabled' not in x: x['enabled'] = True o = {'tenants': tenant_refs, 'tenants_links': []} return o @dependency.requires('policy_api', 'token_provider_api') class V3Controller(wsgi.Application): """Base controller class for Identity API v3. Child classes should set the ``collection_name`` and ``member_name`` class attributes, representing the collection of entities they are exposing to the API. This is required for supporting self-referential links, pagination, etc. Class parameters: * `_public_parameters` - set of parameters that are exposed to the user. Usually used by cls.filter_params() """ collection_name = 'entities' member_name = 'entity' get_member_from_driver = None @classmethod def base_url(cls, context, path=None): endpoint = super(V3Controller, cls).base_url(context, 'public') if not path: path = cls.collection_name return '%s/%s/%s' % (endpoint, 'v3', path.lstrip('/')) def get_auth_context(self, context): # TODO(dolphm): this method of accessing the auth context is terrible, # but context needs to be refactored to always have reasonable values. env_context = context.get('environment', {}) return env_context.get(authorization.AUTH_CONTEXT_ENV, {}) @classmethod def full_url(cls, context, path=None): url = cls.base_url(context, path) if context['environment'].get('QUERY_STRING'): url = '%s?%s' % (url, context['environment']['QUERY_STRING']) return url @classmethod def query_filter_is_true(cls, filter_value): """Determine if bool query param is 'True'. We treat this the same way as we do for policy enforcement: {bool_param}=0 is treated as False Any other value is considered to be equivalent to True, including the absence of a value """ if (isinstance(filter_value, six.string_types) and filter_value == '0'): val = False else: val = True return val @classmethod def _add_self_referential_link(cls, context, ref): ref.setdefault('links', {}) ref['links']['self'] = cls.base_url(context) + '/' + ref['id'] @classmethod def wrap_member(cls, context, ref): cls._add_self_referential_link(context, ref) return {cls.member_name: ref} @classmethod def wrap_collection(cls, context, refs, hints=None): """Wrap a collection, checking for filtering and pagination. Returns the wrapped collection, which includes: - Executing any filtering not already carried out - Truncate to a set limit if necessary - Adds 'self' links in every member - Adds 'next', 'self' and 'prev' links for the whole collection. :param context: the current context, containing the original url path and query string :param refs: the list of members of the collection :param hints: list hints, containing any relevant filters and limit. Any filters already satisfied by managers will have been removed """ # Check if there are any filters in hints that were not # handled by the drivers. The driver will not have paginated or # limited the output if it found there were filters it was unable to # handle. if hints is not None: refs = cls.filter_by_attributes(refs, hints) list_limited, refs = cls.limit(refs, hints) for ref in refs: cls.wrap_member(context, ref) container = {cls.collection_name: refs} container['links'] = { 'next': None, 'self': cls.full_url(context, path=context['path']), 'previous': None} if list_limited: container['truncated'] = True return container @classmethod def limit(cls, refs, hints): """Limits a list of entities. The underlying driver layer may have already truncated the collection for us, but in case it was unable to handle truncation we check here. :param refs: the list of members of the collection :param hints: hints, containing, among other things, the limit requested :returns: boolean indicating whether the list was truncated, as well as the list of (truncated if necessary) entities. """ NOT_LIMITED = False LIMITED = True if hints is None or hints.limit is None: # No truncation was requested return NOT_LIMITED, refs if hints.limit.get('truncated', False): # The driver did truncate the list return LIMITED, refs if len(refs) > hints.limit['limit']: # The driver layer wasn't able to truncate it for us, so we must # do it here return LIMITED, refs[:hints.limit['limit']] return NOT_LIMITED, refs @classmethod def filter_by_attributes(cls, refs, hints): """Filters a list of references by filter values.""" def _attr_match(ref_attr, val_attr): """Matches attributes allowing for booleans as strings. We test explicitly for a value that defines it as 'False', which also means that the existence of the attribute with no value implies 'True' """ if type(ref_attr) is bool: return ref_attr == utils.attr_as_boolean(val_attr) else: return ref_attr == val_attr def _inexact_attr_match(filter, ref): """Applies an inexact filter to a result dict. :param filter: the filter in question :param ref: the dict to check :returns: True if there is a match """ comparator = filter['comparator'] key = filter['name'] if key in ref: filter_value = filter['value'] target_value = ref[key] if not filter['case_sensitive']: # We only support inexact filters on strings so # it's OK to use lower() filter_value = filter_value.lower() target_value = target_value.lower() if comparator == 'contains': return (filter_value in target_value) elif comparator == 'startswith': return target_value.startswith(filter_value) elif comparator == 'endswith': return target_value.endswith(filter_value) else: # We silently ignore unsupported filters return True return False for filter in hints.filters: if filter['comparator'] == 'equals': attr = filter['name'] value = filter['value'] refs = [r for r in refs if _attr_match( utils.flatten_dict(r).get(attr), value)] else: # It might be an inexact filter refs = [r for r in refs if _inexact_attr_match( filter, r)] return refs @classmethod def build_driver_hints(cls, context, supported_filters): """Build list hints based on the context query string. :param context: contains the query_string from which any list hints can be extracted :param supported_filters: list of filters supported, so ignore any keys in query_dict that are not in this list. """ query_dict = context['query_string'] hints = driver_hints.Hints() if query_dict is None: return hints for key in query_dict: # Check if this is an exact filter if supported_filters is None or key in supported_filters: hints.add_filter(key, query_dict[key]) continue # Check if it is an inexact filter for valid_key in supported_filters: # See if this entry in query_dict matches a known key with an # inexact suffix added. If it doesn't match, then that just # means that there is no inexact filter for that key in this # query. if not key.startswith(valid_key + '__'): continue base_key, comparator = key.split('__', 1) # We map the query-style inexact of, for example: # # {'email__contains', 'myISP'} # # into a list directive add filter call parameters of: # # name = 'email' # value = 'myISP' # comparator = 'contains' # case_sensitive = True case_sensitive = True if comparator.startswith('i'): case_sensitive = False comparator = comparator[1:] hints.add_filter(base_key, query_dict[key], comparator=comparator, case_sensitive=case_sensitive) # NOTE(henry-nash): If we were to support pagination, we would pull any # pagination directives out of the query_dict here, and add them into # the hints list. return hints def _require_matching_id(self, value, ref): """Ensures the value matches the reference's ID, if any.""" if 'id' in ref and ref['id'] != value: raise exception.ValidationError('Cannot change ID') def _require_matching_domain_id(self, ref_id, ref, get_member): """Ensure the current domain ID matches the reference one, if any. Provided we want domain IDs to be immutable, check whether any domain_id specified in the ref dictionary matches the existing domain_id for this entity. :param ref_id: the ID of the entity :param ref: the dictionary of new values proposed for this entity :param get_member: The member function to call to get the current entity :raises: :class:`keystone.exception.ValidationError` """ # TODO(henry-nash): It might be safer and more efficient to do this # check in the managers affected, so look to migrate this check to # there in the future. if CONF.domain_id_immutable and 'domain_id' in ref: existing_ref = get_member(ref_id) if ref['domain_id'] != existing_ref['domain_id']: raise exception.ValidationError(_('Cannot change Domain ID')) def _assign_unique_id(self, ref): """Generates and assigns a unique identifier to a reference.""" ref = ref.copy() ref['id'] = uuid.uuid4().hex return ref def _get_domain_id_for_list_request(self, context): """Get the domain_id for a v3 list call. If we running with multiple domain drivers, then the caller must specify a domain_id either as a filter or as part of the token scope. """ if not CONF.identity.domain_specific_drivers_enabled: # We don't need to specify a domain ID in this case return if context['query_string'].get('domain_id') is not None: return context['query_string'].get('domain_id') token_ref = utils.get_token_ref(context) if token_ref.domain_scoped: return token_ref.domain_id elif token_ref.project_scoped: return token_ref.project_domain_id else: LOG.warning( _LW('No domain information specified as part of list request')) raise exception.Unauthorized() def _get_domain_id_from_token(self, context): """Get the domain_id for a v3 create call. In the case of a v3 create entity call that does not specify a domain ID, the spec says that we should use the domain scoping from the token being used. """ try: token_ref = utils.get_token_ref(context) except exception.Unauthorized: if context.get('is_admin'): raise exception.ValidationError( _('You have tried to create a resource using the admin ' 'token. As this token is not within a domain you must ' 'explicitly include a domain for this resource to ' 'belong to.')) raise if token_ref.domain_scoped: return token_ref.domain_id else: # TODO(henry-nash): We should issue an exception here since if # a v3 call does not explicitly specify the domain_id in the # entity, it should be using a domain scoped token. However, # the current tempest heat tests issue a v3 call without this. # This is raised as bug #1283539. Once this is fixed, we # should remove the line below and replace it with an error. # # Ahead of actually changing the code to raise an exception, we # issue a deprecation warning. versionutils.report_deprecated_feature( LOG, _LW('Not specifying a domain during a create user, group or ' 'project call, and relying on falling back to the ' 'default domain, is deprecated as of Liberty and will be ' 'removed in the N release. Specify the domain explicitly ' 'or use a domain-scoped token')) return CONF.identity.default_domain_id def _normalize_domain_id(self, context, ref): """Fill in domain_id if not specified in a v3 call.""" if not ref.get('domain_id'): ref['domain_id'] = self._get_domain_id_from_token(context) return ref @staticmethod def filter_domain_id(ref): """Override v2 filter to let domain_id out for v3 calls.""" return ref def check_protection(self, context, prep_info, target_attr=None): """Provide call protection for complex target attributes. As well as including the standard parameters from the original API call (which is passed in prep_info), this call will add in any additional entities or attributes (passed in target_attr), so that they can be referenced by policy rules. """ if 'is_admin' in context and context['is_admin']: LOG.warning(_LW('RBAC: Bypassing authorization')) else: action = 'identity:%s' % prep_info['f_name'] # TODO(henry-nash) need to log the target attributes as well creds = _build_policy_check_credentials(self, action, context, prep_info['input_attr']) # Build the dict the policy engine will check against from both the # parameters passed into the call we are protecting (which was # stored in the prep_info by protected()), plus the target # attributes provided. policy_dict = {} if target_attr: policy_dict = {'target': target_attr} policy_dict.update(prep_info['input_attr']) if 'filter_attr' in prep_info: policy_dict.update(prep_info['filter_attr']) self.policy_api.enforce(creds, action, utils.flatten_dict(policy_dict)) LOG.debug('RBAC: Authorization granted') @classmethod def filter_params(cls, ref): """Remove unspecified parameters from the dictionary. This function removes unspecified parameters from the dictionary. This method checks only root-level keys from a ref dictionary. :param ref: a dictionary representing deserialized response to be serialized """ ref_keys = set(ref.keys()) blocked_keys = ref_keys - cls._public_parameters for blocked_param in blocked_keys: del ref[blocked_param] return ref keystone-9.0.0/keystone/common/router.py0000664000567000056710000000630012701407105021541 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common import json_home from keystone.common import wsgi class Router(wsgi.ComposableRouter): def __init__(self, controller, collection_key, key, resource_descriptions=None, is_entity_implemented=True, method_template=None): self.controller = controller self.key = key self.collection_key = collection_key self._resource_descriptions = resource_descriptions self._is_entity_implemented = is_entity_implemented self.method_template = method_template or '%s' def add_routes(self, mapper): collection_path = '/%(collection_key)s' % { 'collection_key': self.collection_key} entity_path = '/%(collection_key)s/{%(key)s_id}' % { 'collection_key': self.collection_key, 'key': self.key} mapper.connect( collection_path, controller=self.controller, action=self.method_template % 'create_%s' % self.key, conditions=dict(method=['POST'])) mapper.connect( collection_path, controller=self.controller, action=self.method_template % 'list_%s' % self.collection_key, conditions=dict(method=['GET'])) mapper.connect( entity_path, controller=self.controller, action=self.method_template % 'get_%s' % self.key, conditions=dict(method=['GET'])) mapper.connect( entity_path, controller=self.controller, action=self.method_template % 'update_%s' % self.key, conditions=dict(method=['PATCH'])) mapper.connect( entity_path, controller=self.controller, action=self.method_template % 'delete_%s' % self.key, conditions=dict(method=['DELETE'])) # Add the collection resource and entity resource to the resource # descriptions. collection_rel = json_home.build_v3_resource_relation( self.collection_key) rel_data = {'href': collection_path, } self._resource_descriptions.append((collection_rel, rel_data)) if self._is_entity_implemented: entity_rel = json_home.build_v3_resource_relation(self.key) id_str = '%s_id' % self.key id_param_rel = json_home.build_v3_parameter_relation(id_str) entity_rel_data = { 'href-template': entity_path, 'href-vars': { id_str: id_param_rel, }, } self._resource_descriptions.append((entity_rel, entity_rel_data)) keystone-9.0.0/keystone/common/config.py0000664000567000056710000020737512701407102021502 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import os from oslo_cache import core as cache from oslo_config import cfg from oslo_log import log import oslo_messaging from oslo_middleware import cors import passlib.utils from keystone import exception _DEFAULT_AUTH_METHODS = ['external', 'password', 'token', 'oauth1'] _CERTFILE = '/etc/keystone/ssl/certs/signing_cert.pem' _KEYFILE = '/etc/keystone/ssl/private/signing_key.pem' _SSO_CALLBACK = '/etc/keystone/sso_callback_template.html' _DEPRECATE_PKI_MSG = ('PKI token support has been deprecated in the M ' 'release and will be removed in the O release. Fernet ' 'or UUID tokens are recommended.') _DEPRECATE_INHERIT_MSG = ('The option to enable the OS-INHERIT extension has ' 'been deprecated in the M release and will be ' 'removed in the O release. The OS-INHERIT extension ' 'will be enabled by default.') _DEPRECATE_EP_MSG = ('The option to enable the OS-ENDPOINT-POLICY extension ' 'has been deprecated in the M release and will be ' 'removed in the O release. The OS-ENDPOINT-POLICY ' 'extension will be enabled by default.') FILE_OPTIONS = { None: [ cfg.StrOpt('admin_token', secret=True, default=None, help='A "shared secret" that can be used to bootstrap ' 'Keystone. This "token" does not represent a user, ' 'and carries no explicit authorization. If set ' 'to `None`, the value is ignored and the ' '`admin_token` log in mechanism is effectively ' 'disabled. To completely disable `admin_token` ' 'in production (highly recommended), remove ' 'AdminTokenAuthMiddleware from your paste ' 'application pipelines (for example, in ' 'keystone-paste.ini).'), cfg.StrOpt('public_endpoint', help='The base public endpoint URL for Keystone that is ' 'advertised to clients (NOTE: this does NOT affect ' 'how Keystone listens for connections). ' 'Defaults to the base host URL of the request. E.g. a ' 'request to http://server:5000/v3/users will ' 'default to http://server:5000. You should only need ' 'to set this value if the base URL contains a path ' '(e.g. /prefix/v3) or the endpoint should be found ' 'on a different server.'), cfg.StrOpt('admin_endpoint', help='The base admin endpoint URL for Keystone that is ' 'advertised to clients (NOTE: this does NOT affect ' 'how Keystone listens for connections). ' 'Defaults to the base host URL of the request. E.g. a ' 'request to http://server:35357/v3/users will ' 'default to http://server:35357. You should only need ' 'to set this value if the base URL contains a path ' '(e.g. /prefix/v3) or the endpoint should be found ' 'on a different server.'), cfg.IntOpt('max_project_tree_depth', default=5, help='Maximum depth of the project hierarchy, excluding ' 'the project acting as a domain at the top of the ' 'hierarchy. WARNING: setting it to a large value may ' 'adversely impact performance.'), cfg.IntOpt('max_param_size', default=64, help='Limit the sizes of user & project ID/names.'), # we allow tokens to be a bit larger to accommodate PKI cfg.IntOpt('max_token_size', default=8192, help='Similar to max_param_size, but provides an ' 'exception for token values.'), cfg.StrOpt('member_role_id', default='9fe2ff9ee4384b1894a90878d3e92bab', help='Similar to the member_role_name option, this ' 'represents the default role ID used to associate ' 'users with their default projects in the v2 API. ' 'This will be used as the explicit role where one is ' 'not specified by the v2 API.'), cfg.StrOpt('member_role_name', default='_member_', help='This is the role name used in combination with the ' 'member_role_id option; see that option for more ' 'detail.'), # NOTE(lbragstad/morganfainberg): This value of 10k was # measured as having an approximate 30% clock-time savings # over the old default of 40k. The passlib default is not # static and grows over time to constantly approximate ~300ms # of CPU time to hash; this was considered too high. This # value still exceeds the glibc default of 5k. cfg.IntOpt('crypt_strength', default=10000, min=1000, max=100000, help='The value passed as the keyword "rounds" to ' 'passlib\'s encrypt method.'), cfg.IntOpt('list_limit', help='The maximum number of entities that will be ' 'returned in a collection, with no limit set by ' 'default. This global limit may be then overridden ' 'for a specific driver, by specifying a list_limit ' 'in the appropriate section (e.g. [assignment]).'), cfg.BoolOpt('domain_id_immutable', default=True, help='Set this to false if you want to enable the ' 'ability for user, group and project entities ' 'to be moved between domains by updating their ' 'domain_id. Allowing such movement is not ' 'recommended if the scope of a domain admin is being ' 'restricted by use of an appropriate policy file ' '(see policy.v3cloudsample as an example). This ' 'ability is deprecated and will be removed in a ' 'future release.', deprecated_for_removal=True), cfg.BoolOpt('strict_password_check', default=False, help='If set to true, strict password length checking is ' 'performed for password manipulation. If a password ' 'exceeds the maximum length, the operation will fail ' 'with an HTTP 403 Forbidden error. If set to false, ' 'passwords are automatically truncated to the ' 'maximum length.'), cfg.StrOpt('secure_proxy_ssl_header', default='HTTP_X_FORWARDED_PROTO', help='The HTTP header used to determine the scheme for the ' 'original request, even if it was removed by an SSL ' 'terminating proxy.'), cfg.BoolOpt('insecure_debug', default=False, help='If set to true the server will return information ' 'in the response that may allow an unauthenticated ' 'or authenticated user to get more information than ' 'normal, such as why authentication failed. This may ' 'be useful for debugging but is insecure.'), ], 'identity': [ cfg.StrOpt('default_domain_id', default='default', help='This references the domain to use for all ' 'Identity API v2 requests (which are not aware of ' 'domains). A domain with this ID will be created ' 'for you by keystone-manage db_sync in migration ' '008. The domain referenced by this ID cannot be ' 'deleted on the v3 API, to prevent accidentally ' 'breaking the v2 API. There is nothing special about ' 'this domain, other than the fact that it must ' 'exist to order to maintain support for your v2 ' 'clients.'), cfg.BoolOpt('domain_specific_drivers_enabled', default=False, help='A subset (or all) of domains can have their own ' 'identity driver, each with their own partial ' 'configuration options, stored in either the ' 'resource backend or in a file in a domain ' 'configuration directory (depending on the setting ' 'of domain_configurations_from_database). Only ' 'values specific to the domain need to be specified ' 'in this manner. This feature is disabled by ' 'default; set to true to enable.'), cfg.BoolOpt('domain_configurations_from_database', default=False, help='Extract the domain specific configuration options ' 'from the resource backend where they have been ' 'stored with the domain data. This feature is ' 'disabled by default (in which case the domain ' 'specific options will be loaded from files in the ' 'domain configuration directory); set to true to ' 'enable.'), cfg.StrOpt('domain_config_dir', default='/etc/keystone/domains', help='Path for Keystone to locate the domain specific ' 'identity configuration files if ' 'domain_specific_drivers_enabled is set to true.'), cfg.StrOpt('driver', default='sql', help='Entrypoint for the identity backend driver in the ' 'keystone.identity namespace. Supplied drivers are ' 'ldap and sql.'), cfg.BoolOpt('caching', default=True, help='Toggle for identity caching. This has no ' 'effect unless global caching is enabled.'), cfg.IntOpt('cache_time', default=600, help='Time to cache identity data (in seconds). This has ' 'no effect unless global and identity caching are ' 'enabled.'), cfg.IntOpt('max_password_length', default=4096, max=passlib.utils.MAX_PASSWORD_SIZE, help='Maximum supported length for user passwords; ' 'decrease to improve performance.'), cfg.IntOpt('list_limit', help='Maximum number of entities that will be returned in ' 'an identity collection.'), ], 'identity_mapping': [ cfg.StrOpt('driver', default='sql', help='Entrypoint for the identity mapping backend driver ' 'in the keystone.identity.id_mapping namespace.'), cfg.StrOpt('generator', default='sha256', help='Entrypoint for the public ID generator for user and ' 'group entities in the keystone.identity.id_generator ' 'namespace. The Keystone identity mapper only ' 'supports generators that produce no more than 64 ' 'characters.'), cfg.BoolOpt('backward_compatible_ids', default=True, help='The format of user and group IDs changed ' 'in Juno for backends that do not generate UUIDs ' '(e.g. LDAP), with keystone providing a hash mapping ' 'to the underlying attribute in LDAP. By default ' 'this mapping is disabled, which ensures that ' 'existing IDs will not change. Even when the ' 'mapping is enabled by using domain specific ' 'drivers, any users and groups from the default ' 'domain being handled by LDAP will still not be ' 'mapped to ensure their IDs remain backward ' 'compatible. Setting this value to False will ' 'enable the mapping for even the default LDAP ' 'driver. It is only safe to do this if you do not ' 'already have assignments for users and ' 'groups from the default LDAP domain, and it is ' 'acceptable for Keystone to provide the different ' 'IDs to clients than it did previously. Typically ' 'this means that the only time you can set this ' 'value to False is when configuring a fresh ' 'installation.'), ], 'shadow_users': [ cfg.StrOpt('driver', default='sql', help='Entrypoint for the shadow users backend driver ' 'in the keystone.identity.shadow_users namespace.'), ], 'trust': [ cfg.BoolOpt('enabled', default=True, help='Delegation and impersonation features can be ' 'optionally disabled.'), cfg.BoolOpt('allow_redelegation', default=False, help='Enable redelegation feature.'), cfg.IntOpt('max_redelegation_count', default=3, help='Maximum depth of trust redelegation.'), cfg.StrOpt('driver', default='sql', help='Entrypoint for the trust backend driver in the ' 'keystone.trust namespace.')], 'os_inherit': [ cfg.BoolOpt('enabled', default=True, deprecated_for_removal=True, deprecated_reason=_DEPRECATE_INHERIT_MSG, help='role-assignment inheritance to projects from ' 'owning domain or from projects higher in the ' 'hierarchy can be optionally disabled. In the ' 'future, this option will be removed and the ' 'hierarchy will be always enabled.'), ], 'fernet_tokens': [ cfg.StrOpt('key_repository', default='/etc/keystone/fernet-keys/', help='Directory containing Fernet token keys.'), cfg.IntOpt('max_active_keys', default=3, help='This controls how many keys are held in rotation by ' 'keystone-manage fernet_rotate before they are ' 'discarded. The default value of 3 means that ' 'keystone will maintain one staged key, one primary ' 'key, and one secondary key. Increasing this value ' 'means that additional secondary keys will be kept in ' 'the rotation.'), ], 'token': [ cfg.ListOpt('bind', default=[], help='External auth mechanisms that should add bind ' 'information to token, e.g., kerberos,x509.'), cfg.StrOpt('enforce_token_bind', default='permissive', help='Enforcement policy on tokens presented to Keystone ' 'with bind information. One of disabled, permissive, ' 'strict, required or a specifically required bind ' 'mode, e.g., kerberos or x509 to require binding to ' 'that authentication.'), cfg.IntOpt('expiration', default=3600, help='Amount of time a token should remain valid ' '(in seconds).'), cfg.StrOpt('provider', default='uuid', help='Controls the token construction, validation, and ' 'revocation operations. Entrypoint in the ' 'keystone.token.provider namespace. Core providers ' 'are [fernet|pkiz|pki|uuid].'), cfg.StrOpt('driver', default='sql', help='Entrypoint for the token persistence backend driver ' 'in the keystone.token.persistence namespace. ' 'Supplied drivers are kvs, memcache, memcache_pool, ' 'and sql.'), cfg.BoolOpt('caching', default=True, help='Toggle for token system caching. This has no ' 'effect unless global caching is enabled.'), cfg.IntOpt('cache_time', help='Time to cache tokens (in seconds). This has no ' 'effect unless global and token caching are ' 'enabled.'), cfg.BoolOpt('revoke_by_id', default=True, help='Revoke token by token identifier. Setting ' 'revoke_by_id to true enables various forms of ' 'enumerating tokens, e.g. `list tokens for user`. ' 'These enumerations are processed to determine the ' 'list of tokens to revoke. Only disable if you are ' 'switching to using the Revoke extension with a ' 'backend other than KVS, which stores events in memory.'), cfg.BoolOpt('allow_rescope_scoped_token', default=True, help='Allow rescoping of scoped token. Setting ' 'allow_rescoped_scoped_token to false prevents a user ' 'from exchanging a scoped token for any other token.'), cfg.StrOpt('hash_algorithm', default='md5', deprecated_for_removal=True, deprecated_reason=_DEPRECATE_PKI_MSG, help='The hash algorithm to use for PKI tokens. This can ' 'be set to any algorithm that hashlib supports. ' 'WARNING: Before changing this value, the auth_token ' 'middleware must be configured with the ' 'hash_algorithms, otherwise token revocation will ' 'not be processed correctly.'), cfg.BoolOpt('infer_roles', default=True, help='Add roles to token that are not explicitly added, ' 'but that are linked implicitly to other roles.'), ], 'revoke': [ cfg.StrOpt('driver', default='sql', help='Entrypoint for an implementation of the backend for ' 'persisting revocation events in the keystone.revoke ' 'namespace. Supplied drivers are kvs and sql.'), cfg.IntOpt('expiration_buffer', default=1800, help='This value (calculated in seconds) is added to token ' 'expiration before a revocation event may be removed ' 'from the backend.'), cfg.BoolOpt('caching', default=True, help='Toggle for revocation event caching. This has no ' 'effect unless global caching is enabled.'), cfg.IntOpt('cache_time', default=3600, help='Time to cache the revocation list and the revocation ' 'events (in seconds). This has no effect unless ' 'global and token caching are enabled.', deprecated_opts=[cfg.DeprecatedOpt( 'revocation_cache_time', group='token')]), ], 'ssl': [ cfg.StrOpt('ca_key', default='/etc/keystone/ssl/private/cakey.pem', help='Path of the CA key file for SSL.'), cfg.IntOpt('key_size', default=1024, min=1024, help='SSL key length (in bits) (auto generated ' 'certificate).'), cfg.IntOpt('valid_days', default=3650, help='Days the certificate is valid for once signed ' '(auto generated certificate).'), cfg.StrOpt('cert_subject', default='/C=US/ST=Unset/L=Unset/O=Unset/CN=localhost', help='SSL certificate subject (auto generated ' 'certificate).'), ], 'signing': [ cfg.StrOpt('certfile', default=_CERTFILE, deprecated_for_removal=True, deprecated_reason=_DEPRECATE_PKI_MSG, help='Path of the certfile for token signing. For ' 'non-production environments, you may be interested ' 'in using `keystone-manage pki_setup` to generate ' 'self-signed certificates.'), cfg.StrOpt('keyfile', default=_KEYFILE, deprecated_for_removal=True, deprecated_reason=_DEPRECATE_PKI_MSG, help='Path of the keyfile for token signing.'), cfg.StrOpt('ca_certs', deprecated_for_removal=True, deprecated_reason=_DEPRECATE_PKI_MSG, default='/etc/keystone/ssl/certs/ca.pem', help='Path of the CA for token signing.'), cfg.StrOpt('ca_key', default='/etc/keystone/ssl/private/cakey.pem', deprecated_for_removal=True, deprecated_reason=_DEPRECATE_PKI_MSG, help='Path of the CA key for token signing.'), cfg.IntOpt('key_size', default=2048, min=1024, deprecated_for_removal=True, deprecated_reason=_DEPRECATE_PKI_MSG, help='Key size (in bits) for token signing cert ' '(auto generated certificate).'), cfg.IntOpt('valid_days', default=3650, deprecated_for_removal=True, deprecated_reason=_DEPRECATE_PKI_MSG, help='Days the token signing cert is valid for ' '(auto generated certificate).'), cfg.StrOpt('cert_subject', deprecated_for_removal=True, deprecated_reason=_DEPRECATE_PKI_MSG, default=('/C=US/ST=Unset/L=Unset/O=Unset/' 'CN=www.example.com'), help='Certificate subject (auto generated certificate) for ' 'token signing.'), ], 'assignment': [ cfg.StrOpt('driver', help='Entrypoint for the assignment backend driver in the ' 'keystone.assignment namespace. Only an SQL driver is ' 'supplied. If an assignment driver is not ' 'specified, the identity driver will choose the ' 'assignment driver (driver selection based on ' '`[identity]/driver` option is deprecated and will be ' 'removed in the "O" release).'), cfg.ListOpt('prohibited_implied_role', default=['admin'], help='A list of role names which are prohibited from ' 'being an implied role.'), ], 'resource': [ cfg.StrOpt('driver', help='Entrypoint for the resource backend driver in the ' 'keystone.resource namespace. Only an SQL driver is ' 'supplied. If a resource driver is not specified, ' 'the assignment driver will choose the resource ' 'driver.'), cfg.BoolOpt('caching', default=True, deprecated_opts=[cfg.DeprecatedOpt('caching', group='assignment')], help='Toggle for resource caching. This has no effect ' 'unless global caching is enabled.'), cfg.IntOpt('cache_time', deprecated_opts=[cfg.DeprecatedOpt('cache_time', group='assignment')], help='TTL (in seconds) to cache resource data. This has ' 'no effect unless global caching is enabled.'), cfg.IntOpt('list_limit', deprecated_opts=[cfg.DeprecatedOpt('list_limit', group='assignment')], help='Maximum number of entities that will be returned ' 'in a resource collection.'), cfg.StrOpt('admin_project_domain_name', help='Name of the domain that owns the ' '`admin_project_name`. Defaults to None.'), cfg.StrOpt('admin_project_name', help='Special project for performing administrative ' 'operations on remote services. Tokens scoped to ' 'this project will contain the key/value ' '`is_admin_project=true`. Defaults to None.'), cfg.StrOpt('project_name_url_safe', choices=['off', 'new', 'strict'], default='off', help='Whether the names of projects are restricted from ' 'containing url reserved characters. If set to new, ' 'attempts to create or update a project with a url ' 'unsafe name will return an error. In addition, if ' 'set to strict, attempts to scope a token using ' 'an unsafe project name will return an error.'), cfg.StrOpt('domain_name_url_safe', choices=['off', 'new', 'strict'], default='off', help='Whether the names of domains are restricted from ' 'containing url reserved characters. If set to new, ' 'attempts to create or update a domain with a url ' 'unsafe name will return an error. In addition, if ' 'set to strict, attempts to scope a token using a ' 'domain name which is unsafe will return an error.'), ], 'domain_config': [ cfg.StrOpt('driver', default='sql', help='Entrypoint for the domain config backend driver in ' 'the keystone.resource.domain_config namespace.'), cfg.BoolOpt('caching', default=True, help='Toggle for domain config caching. This has no ' 'effect unless global caching is enabled.'), cfg.IntOpt('cache_time', default=300, help='TTL (in seconds) to cache domain config data. This ' 'has no effect unless domain config caching is ' 'enabled.'), ], 'role': [ # The role driver has no default for backward compatibility reasons. # If role driver is not specified, the assignment driver chooses # the backend cfg.StrOpt('driver', help='Entrypoint for the role backend driver in the ' 'keystone.role namespace. Supplied drivers are ldap ' 'and sql.'), cfg.BoolOpt('caching', default=True, help='Toggle for role caching. This has no effect ' 'unless global caching is enabled.'), cfg.IntOpt('cache_time', help='TTL (in seconds) to cache role data. This has ' 'no effect unless global caching is enabled.'), cfg.IntOpt('list_limit', help='Maximum number of entities that will be returned ' 'in a role collection.'), ], 'credential': [ cfg.StrOpt('driver', default='sql', help='Entrypoint for the credential backend driver in the ' 'keystone.credential namespace.'), ], 'oauth1': [ cfg.StrOpt('driver', default='sql', help='Entrypoint for the OAuth backend driver in the ' 'keystone.oauth1 namespace.'), cfg.IntOpt('request_token_duration', default=28800, help='Duration (in seconds) for the OAuth Request Token.'), cfg.IntOpt('access_token_duration', default=86400, help='Duration (in seconds) for the OAuth Access Token.'), ], 'federation': [ cfg.StrOpt('driver', default='sql', help='Entrypoint for the federation backend driver in the ' 'keystone.federation namespace.'), cfg.StrOpt('assertion_prefix', default='', help='Value to be used when filtering assertion parameters ' 'from the environment.'), cfg.StrOpt('remote_id_attribute', help='Value to be used to obtain the entity ID of the ' 'Identity Provider from the environment (e.g. if ' 'using the mod_shib plugin this value is ' '`Shib-Identity-Provider`).'), cfg.StrOpt('federated_domain_name', default='Federated', help='A domain name that is reserved to allow federated ' 'ephemeral users to have a domain concept. Note that ' 'an admin will not be able to create a domain with ' 'this name or update an existing domain to this ' 'name. You are not advised to change this value ' 'unless you really have to.'), cfg.MultiStrOpt('trusted_dashboard', default=[], help='A list of trusted dashboard hosts. Before ' 'accepting a Single Sign-On request to return a ' 'token, the origin host must be a member of the ' 'trusted_dashboard list. This configuration ' 'option may be repeated for multiple values. ' 'For example: ' 'trusted_dashboard=http://acme.com/auth/websso ' 'trusted_dashboard=http://beta.com/auth/websso'), cfg.StrOpt('sso_callback_template', default=_SSO_CALLBACK, help='Location of Single Sign-On callback handler, will ' 'return a token to a trusted dashboard host.'), ], 'policy': [ cfg.StrOpt('driver', default='sql', help='Entrypoint for the policy backend driver in the ' 'keystone.policy namespace. Supplied drivers are ' 'rules and sql.'), cfg.IntOpt('list_limit', help='Maximum number of entities that will be returned ' 'in a policy collection.'), ], 'endpoint_filter': [ cfg.StrOpt('driver', default='sql', help='Entrypoint for the endpoint filter backend driver in ' 'the keystone.endpoint_filter namespace.'), cfg.BoolOpt('return_all_endpoints_if_no_filter', default=True, help='Toggle to return all active endpoints if no filter ' 'exists.'), ], 'endpoint_policy': [ cfg.BoolOpt('enabled', default=True, deprecated_for_removal=True, deprecated_reason=_DEPRECATE_EP_MSG, help='Enable endpoint_policy functionality.'), cfg.StrOpt('driver', default='sql', help='Entrypoint for the endpoint policy backend driver in ' 'the keystone.endpoint_policy namespace.'), ], 'ldap': [ cfg.StrOpt('url', default='ldap://localhost', help='URL(s) for connecting to the LDAP server. Multiple ' 'LDAP URLs may be specified as a comma separated ' 'string. The first URL to successfully bind is used ' 'for the connection.'), cfg.StrOpt('user', help='User BindDN to query the LDAP server.'), cfg.StrOpt('password', secret=True, help='Password for the BindDN to query the LDAP server.'), cfg.StrOpt('suffix', default='cn=example,cn=com', help='LDAP server suffix'), cfg.BoolOpt('use_dumb_member', default=False, help='If true, will add a dummy member to groups. This is ' 'required if the objectclass for groups requires the ' '"member" attribute.'), cfg.StrOpt('dumb_member', default='cn=dumb,dc=nonexistent', help='DN of the "dummy member" to use when ' '"use_dumb_member" is enabled.'), cfg.BoolOpt('allow_subtree_delete', default=False, help='Delete subtrees using the subtree delete control. ' 'Only enable this option if your LDAP server ' 'supports subtree deletion.'), cfg.StrOpt('query_scope', default='one', choices=['one', 'sub'], help='The LDAP scope for queries, "one" represents ' 'oneLevel/singleLevel and "sub" represents ' 'subtree/wholeSubtree options.'), cfg.IntOpt('page_size', default=0, help='Maximum results per page; a value of zero ("0") ' 'disables paging.'), cfg.StrOpt('alias_dereferencing', default='default', choices=['never', 'searching', 'always', 'finding', 'default'], help='The LDAP dereferencing option for queries. The ' '"default" option falls back to using default ' 'dereferencing configured by your ldap.conf.'), cfg.IntOpt('debug_level', help='Sets the LDAP debugging level for LDAP calls. ' 'A value of 0 means that debugging is not enabled. ' 'This value is a bitmask, consult your LDAP ' 'documentation for possible values.'), cfg.BoolOpt('chase_referrals', help='Override the system\'s default referral chasing ' 'behavior for queries.'), cfg.StrOpt('user_tree_dn', help='Search base for users. ' 'Defaults to the suffix value.'), cfg.StrOpt('user_filter', help='LDAP search filter for users.'), cfg.StrOpt('user_objectclass', default='inetOrgPerson', help='LDAP objectclass for users.'), cfg.StrOpt('user_id_attribute', default='cn', help='LDAP attribute mapped to user id. ' 'WARNING: must not be a multivalued attribute.'), cfg.StrOpt('user_name_attribute', default='sn', help='LDAP attribute mapped to user name.'), cfg.StrOpt('user_description_attribute', default='description', help='LDAP attribute mapped to user description.'), cfg.StrOpt('user_mail_attribute', default='mail', help='LDAP attribute mapped to user email.'), cfg.StrOpt('user_pass_attribute', default='userPassword', help='LDAP attribute mapped to password.'), cfg.StrOpt('user_enabled_attribute', default='enabled', help='LDAP attribute mapped to user enabled flag.'), cfg.BoolOpt('user_enabled_invert', default=False, help='Invert the meaning of the boolean enabled values. ' 'Some LDAP servers use a boolean lock attribute ' 'where "true" means an account is disabled. Setting ' '"user_enabled_invert = true" will allow these lock ' 'attributes to be used. This setting will have no ' 'effect if "user_enabled_mask" or ' '"user_enabled_emulation" settings are in use.'), cfg.IntOpt('user_enabled_mask', default=0, help='Bitmask integer to indicate the bit that the enabled ' 'value is stored in if the LDAP server represents ' '"enabled" as a bit on an integer rather than a ' 'boolean. A value of "0" indicates the mask is not ' 'used. If this is not set to "0" the typical value ' 'is "2". This is typically used when ' '"user_enabled_attribute = userAccountControl".'), cfg.StrOpt('user_enabled_default', default='True', help='Default value to enable users. This should match an ' 'appropriate int value if the LDAP server uses ' 'non-boolean (bitmask) values to indicate if a user ' 'is enabled or disabled. If this is not set to "True" ' 'the typical value is "512". This is typically used ' 'when "user_enabled_attribute = userAccountControl".'), cfg.ListOpt('user_attribute_ignore', default=['default_project_id'], help='List of attributes stripped off the user on ' 'update.'), cfg.StrOpt('user_default_project_id_attribute', help='LDAP attribute mapped to default_project_id for ' 'users.'), cfg.BoolOpt('user_allow_create', default=True, deprecated_for_removal=True, deprecated_reason="Write support for Identity LDAP " "backends has been deprecated in the M " "release and will be removed in the O " "release.", help='Allow user creation in LDAP backend.'), cfg.BoolOpt('user_allow_update', default=True, deprecated_for_removal=True, deprecated_reason="Write support for Identity LDAP " "backends has been deprecated in the M " "release and will be removed in the O " "release.", help='Allow user updates in LDAP backend.'), cfg.BoolOpt('user_allow_delete', default=True, deprecated_for_removal=True, deprecated_reason="Write support for Identity LDAP " "backends has been deprecated in the M " "release and will be removed in the O " "release.", help='Allow user deletion in LDAP backend.'), cfg.BoolOpt('user_enabled_emulation', default=False, help='If true, Keystone uses an alternative method to ' 'determine if a user is enabled or not by checking ' 'if they are a member of the ' '"user_enabled_emulation_dn" group.'), cfg.StrOpt('user_enabled_emulation_dn', help='DN of the group entry to hold enabled users when ' 'using enabled emulation.'), cfg.BoolOpt('user_enabled_emulation_use_group_config', default=False, help='Use the "group_member_attribute" and ' '"group_objectclass" settings to determine ' 'membership in the emulated enabled group.'), cfg.ListOpt('user_additional_attribute_mapping', default=[], help='List of additional LDAP attributes used for mapping ' 'additional attribute mappings for users. Attribute ' 'mapping format is :, where ' 'ldap_attr is the attribute in the LDAP entry and ' 'user_attr is the Identity API attribute.'), cfg.StrOpt('group_tree_dn', help='Search base for groups. ' 'Defaults to the suffix value.'), cfg.StrOpt('group_filter', help='LDAP search filter for groups.'), cfg.StrOpt('group_objectclass', default='groupOfNames', help='LDAP objectclass for groups.'), cfg.StrOpt('group_id_attribute', default='cn', help='LDAP attribute mapped to group id.'), cfg.StrOpt('group_name_attribute', default='ou', help='LDAP attribute mapped to group name.'), cfg.StrOpt('group_member_attribute', default='member', help='LDAP attribute mapped to show group membership.'), cfg.StrOpt('group_desc_attribute', default='description', help='LDAP attribute mapped to group description.'), cfg.ListOpt('group_attribute_ignore', default=[], help='List of attributes stripped off the group on ' 'update.'), cfg.BoolOpt('group_allow_create', default=True, deprecated_for_removal=True, deprecated_reason="Write support for Identity LDAP " "backends has been deprecated in the M " "release and will be removed in the O " "release.", help='Allow group creation in LDAP backend.'), cfg.BoolOpt('group_allow_update', default=True, deprecated_for_removal=True, deprecated_reason="Write support for Identity LDAP " "backends has been deprecated in the M " "release and will be removed in the O " "release.", help='Allow group update in LDAP backend.'), cfg.BoolOpt('group_allow_delete', default=True, deprecated_for_removal=True, deprecated_reason="Write support for Identity LDAP " "backends has been deprecated in the M " "release and will be removed in the O " "release.", help='Allow group deletion in LDAP backend.'), cfg.ListOpt('group_additional_attribute_mapping', default=[], help='Additional attribute mappings for groups. Attribute ' 'mapping format is :, where ' 'ldap_attr is the attribute in the LDAP entry and ' 'user_attr is the Identity API attribute.'), cfg.StrOpt('tls_cacertfile', help='CA certificate file path for communicating with ' 'LDAP servers.'), cfg.StrOpt('tls_cacertdir', help='CA certificate directory path for communicating with ' 'LDAP servers.'), cfg.BoolOpt('use_tls', default=False, help='Enable TLS for communicating with LDAP servers.'), cfg.StrOpt('tls_req_cert', default='demand', choices=['demand', 'never', 'allow'], help='Specifies what checks to perform on client ' 'certificates in an incoming TLS session.'), cfg.BoolOpt('use_pool', default=True, help='Enable LDAP connection pooling.'), cfg.IntOpt('pool_size', default=10, help='Connection pool size.'), cfg.IntOpt('pool_retry_max', default=3, help='Maximum count of reconnect trials.'), cfg.FloatOpt('pool_retry_delay', default=0.1, help='Time span in seconds to wait between two ' 'reconnect trials.'), cfg.IntOpt('pool_connection_timeout', default=-1, help='Connector timeout in seconds. Value -1 indicates ' 'indefinite wait for response.'), cfg.IntOpt('pool_connection_lifetime', default=600, help='Connection lifetime in seconds.'), cfg.BoolOpt('use_auth_pool', default=True, help='Enable LDAP connection pooling for end user ' 'authentication. If use_pool is disabled, then this ' 'setting is meaningless and is not used at all.'), cfg.IntOpt('auth_pool_size', default=100, help='End user auth connection pool size.'), cfg.IntOpt('auth_pool_connection_lifetime', default=60, help='End user auth connection lifetime in seconds.'), cfg.BoolOpt('group_members_are_ids', default=False, help='If the members of the group objectclass are user ' 'IDs rather than DNs, set this to true. This is the ' 'case when using posixGroup as the group ' 'objectclass and OpenDirectory.'), ], 'auth': [ cfg.ListOpt('methods', default=_DEFAULT_AUTH_METHODS, help='Allowed authentication methods.'), cfg.StrOpt('password', # nosec : This is the name of the plugin, not # a password that needs to be protected. help='Entrypoint for the password auth plugin module in ' 'the keystone.auth.password namespace.'), cfg.StrOpt('token', help='Entrypoint for the token auth plugin module in the ' 'keystone.auth.token namespace.'), # deals with REMOTE_USER authentication cfg.StrOpt('external', help='Entrypoint for the external (REMOTE_USER) auth ' 'plugin module in the keystone.auth.external ' 'namespace. Supplied drivers are DefaultDomain and ' 'Domain. The default driver is DefaultDomain.'), cfg.StrOpt('oauth1', help='Entrypoint for the oAuth1.0 auth plugin module in ' 'the keystone.auth.oauth1 namespace.'), ], 'tokenless_auth': [ cfg.MultiStrOpt('trusted_issuer', default=[], help='The list of trusted issuers to further filter ' 'the certificates that are allowed to ' 'participate in the X.509 tokenless ' 'authorization. If the option is absent then ' 'no certificates will be allowed. ' 'The naming format for the attributes of a ' 'Distinguished Name(DN) must be separated by a ' 'comma and contain no spaces. This configuration ' 'option may be repeated for multiple values. ' 'For example: ' 'trusted_issuer=CN=john,OU=keystone,O=openstack ' 'trusted_issuer=CN=mary,OU=eng,O=abc'), cfg.StrOpt('protocol', default='x509', help='The protocol name for the X.509 tokenless ' 'authorization along with the option issuer_attribute ' 'below can look up its corresponding mapping.'), cfg.StrOpt('issuer_attribute', default='SSL_CLIENT_I_DN', help='The issuer attribute that is served as an IdP ID ' 'for the X.509 tokenless authorization along with ' 'the protocol to look up its corresponding mapping. ' 'It is the environment variable in the WSGI ' 'environment that references to the issuer of the ' 'client certificate.'), ], 'paste_deploy': [ cfg.StrOpt('config_file', default='keystone-paste.ini', help='Name of the paste configuration file that defines ' 'the available pipelines.'), ], 'memcache': [ cfg.ListOpt('servers', default=['localhost:11211'], help='Memcache servers in the format of "host:port".'), cfg.IntOpt('dead_retry', default=5 * 60, help='Number of seconds memcached server is considered dead' ' before it is tried again. This is used by the key ' 'value store system (e.g. token ' 'pooled memcached persistence backend).'), cfg.IntOpt('socket_timeout', default=3, help='Timeout in seconds for every call to a server. This ' 'is used by the key value store system (e.g. token ' 'pooled memcached persistence backend).'), cfg.IntOpt('pool_maxsize', default=10, help='Max total number of open connections to every' ' memcached server. This is used by the key value ' 'store system (e.g. token pooled memcached ' 'persistence backend).'), cfg.IntOpt('pool_unused_timeout', default=60, help='Number of seconds a connection to memcached is held' ' unused in the pool before it is closed. This is used' ' by the key value store system (e.g. token pooled ' 'memcached persistence backend).'), cfg.IntOpt('pool_connection_get_timeout', default=10, help='Number of seconds that an operation will wait to get ' 'a memcache client connection. This is used by the ' 'key value store system (e.g. token pooled memcached ' 'persistence backend).'), ], 'catalog': [ cfg.StrOpt('template_file', default='default_catalog.templates', help='Catalog template file name for use with the ' 'template catalog backend.'), cfg.StrOpt('driver', default='sql', help='Entrypoint for the catalog backend driver in the ' 'keystone.catalog namespace. Supplied drivers are ' 'kvs, sql, templated, and endpoint_filter.sql'), cfg.BoolOpt('caching', default=True, help='Toggle for catalog caching. This has no ' 'effect unless global caching is enabled.'), cfg.IntOpt('cache_time', help='Time to cache catalog data (in seconds). This has no ' 'effect unless global and catalog caching are ' 'enabled.'), cfg.IntOpt('list_limit', help='Maximum number of entities that will be returned ' 'in a catalog collection.'), ], 'kvs': [ cfg.ListOpt('backends', default=[], help='Extra dogpile.cache backend modules to register ' 'with the dogpile.cache library.'), cfg.StrOpt('config_prefix', default='keystone.kvs', help='Prefix for building the configuration dictionary ' 'for the KVS region. This should not need to be ' 'changed unless there is another dogpile.cache ' 'region with the same configuration name.'), cfg.BoolOpt('enable_key_mangler', default=True, help='Toggle to disable using a key-mangling function ' 'to ensure fixed length keys. This is toggle-able ' 'for debugging purposes, it is highly recommended ' 'to always leave this set to true.'), cfg.IntOpt('default_lock_timeout', default=5, help='Default lock timeout (in seconds) for distributed ' 'locking.'), ], 'saml': [ cfg.IntOpt('assertion_expiration_time', default=3600, help='Default TTL, in seconds, for any generated SAML ' 'assertion created by Keystone.'), cfg.StrOpt('xmlsec1_binary', default='xmlsec1', help='Binary to be called for XML signing. Install the ' 'appropriate package, specify absolute path or adjust ' 'your PATH environment variable if the binary cannot ' 'be found.'), cfg.StrOpt('certfile', default=_CERTFILE, help='Path of the certfile for SAML signing. For ' 'non-production environments, you may be interested ' 'in using `keystone-manage pki_setup` to generate ' 'self-signed certificates. Note, the path cannot ' 'contain a comma.'), cfg.StrOpt('keyfile', default=_KEYFILE, help='Path of the keyfile for SAML signing. Note, the path ' 'cannot contain a comma.'), cfg.StrOpt('idp_entity_id', help='Entity ID value for unique Identity Provider ' 'identification. Usually FQDN is set with a suffix. ' 'A value is required to generate IDP Metadata. ' 'For example: https://keystone.example.com/v3/' 'OS-FEDERATION/saml2/idp'), cfg.StrOpt('idp_sso_endpoint', help='Identity Provider Single-Sign-On service value, ' 'required in the Identity Provider\'s metadata. ' 'A value is required to generate IDP Metadata. ' 'For example: https://keystone.example.com/v3/' 'OS-FEDERATION/saml2/sso'), cfg.StrOpt('idp_lang', default='en', help='Language used by the organization.'), cfg.StrOpt('idp_organization_name', help='Organization name the installation belongs to.'), cfg.StrOpt('idp_organization_display_name', help='Organization name to be displayed.'), cfg.StrOpt('idp_organization_url', help='URL of the organization.'), cfg.StrOpt('idp_contact_company', help='Company of contact person.'), cfg.StrOpt('idp_contact_name', help='Given name of contact person'), cfg.StrOpt('idp_contact_surname', help='Surname of contact person.'), cfg.StrOpt('idp_contact_email', help='Email address of contact person.'), cfg.StrOpt('idp_contact_telephone', help='Telephone number of contact person.'), cfg.StrOpt('idp_contact_type', default='other', choices=['technical', 'support', 'administrative', 'billing', 'other'], help='The contact type describing the main point of ' 'contact for the identity provider.'), cfg.StrOpt('idp_metadata_path', default='/etc/keystone/saml2_idp_metadata.xml', help='Path to the Identity Provider Metadata file. ' 'This file should be generated with the ' 'keystone-manage saml_idp_metadata command.'), cfg.StrOpt('relay_state_prefix', default='ss:mem:', help='The prefix to use for the RelayState SAML ' 'attribute, used when generating ECP wrapped ' 'assertions.'), ], 'eventlet_server': [ cfg.IntOpt('public_workers', deprecated_name='public_workers', deprecated_group='DEFAULT', deprecated_for_removal=True, help='The number of worker processes to serve the public ' 'eventlet application. Defaults to number of CPUs ' '(minimum of 2).'), cfg.IntOpt('admin_workers', deprecated_name='admin_workers', deprecated_group='DEFAULT', deprecated_for_removal=True, help='The number of worker processes to serve the admin ' 'eventlet application. Defaults to number of CPUs ' '(minimum of 2).'), cfg.StrOpt('public_bind_host', default='0.0.0.0', # nosec : Bind to all interfaces by # default for backwards compatibility. deprecated_opts=[cfg.DeprecatedOpt('bind_host', group='DEFAULT'), cfg.DeprecatedOpt('public_bind_host', group='DEFAULT'), ], deprecated_for_removal=True, help='The IP address of the network interface for the ' 'public service to listen on.'), cfg.PortOpt('public_port', default=5000, deprecated_name='public_port', deprecated_group='DEFAULT', deprecated_for_removal=True, help='The port number which the public service listens ' 'on.'), cfg.StrOpt('admin_bind_host', default='0.0.0.0', # nosec : Bind to all interfaces by # default for backwards compatibility. deprecated_opts=[cfg.DeprecatedOpt('bind_host', group='DEFAULT'), cfg.DeprecatedOpt('admin_bind_host', group='DEFAULT')], deprecated_for_removal=True, help='The IP address of the network interface for the ' 'admin service to listen on.'), cfg.PortOpt('admin_port', default=35357, deprecated_name='admin_port', deprecated_group='DEFAULT', deprecated_for_removal=True, help='The port number which the admin service listens ' 'on.'), cfg.BoolOpt('wsgi_keep_alive', default=True, help='If set to false, disables keepalives on the server; ' 'all connections will be closed after serving one ' 'request.'), cfg.IntOpt('client_socket_timeout', default=900, help='Timeout for socket operations on a client ' 'connection. If an incoming connection is idle for ' 'this number of seconds it will be closed. A value ' 'of "0" means wait forever.'), cfg.BoolOpt('tcp_keepalive', default=False, deprecated_name='tcp_keepalive', deprecated_group='DEFAULT', deprecated_for_removal=True, help='Set this to true if you want to enable ' 'TCP_KEEPALIVE on server sockets, i.e. sockets used ' 'by the Keystone wsgi server for client ' 'connections.'), cfg.IntOpt('tcp_keepidle', default=600, deprecated_name='tcp_keepidle', deprecated_group='DEFAULT', deprecated_for_removal=True, help='Sets the value of TCP_KEEPIDLE in seconds for each ' 'server socket. Only applies if tcp_keepalive is ' 'true. Ignored if system does not support it.'), ], 'eventlet_server_ssl': [ cfg.BoolOpt('enable', default=False, deprecated_name='enable', deprecated_group='ssl', deprecated_for_removal=True, help='Toggle for SSL support on the Keystone ' 'eventlet servers.'), cfg.StrOpt('certfile', default='/etc/keystone/ssl/certs/keystone.pem', deprecated_name='certfile', deprecated_group='ssl', deprecated_for_removal=True, help='Path of the certfile for SSL. For non-production ' 'environments, you may be interested in using ' '`keystone-manage ssl_setup` to generate self-signed ' 'certificates.'), cfg.StrOpt('keyfile', default='/etc/keystone/ssl/private/keystonekey.pem', deprecated_name='keyfile', deprecated_group='ssl', deprecated_for_removal=True, help='Path of the keyfile for SSL.'), cfg.StrOpt('ca_certs', default='/etc/keystone/ssl/certs/ca.pem', deprecated_name='ca_certs', deprecated_group='ssl', deprecated_for_removal=True, help='Path of the CA cert file for SSL.'), cfg.BoolOpt('cert_required', default=False, deprecated_name='cert_required', deprecated_group='ssl', deprecated_for_removal=True, help='Require client certificate.'), ], } CONF = cfg.CONF oslo_messaging.set_transport_defaults(control_exchange='keystone') def _register_auth_plugin_opt(conf, option): conf.register_opt(option, group='auth') def setup_authentication(conf=None): # register any non-default auth methods here (used by extensions, etc) if conf is None: conf = CONF for method_name in conf.auth.methods: if method_name not in _DEFAULT_AUTH_METHODS: option = cfg.StrOpt(method_name) _register_auth_plugin_opt(conf, option) def set_default_for_default_log_levels(): """Set the default for the default_log_levels option for keystone. Keystone uses some packages that other OpenStack services don't use that do logging. This will set the default_log_levels default level for those packages. This function needs to be called before CONF(). """ extra_log_level_defaults = [ 'dogpile=INFO', 'routes=INFO', ] log.register_options(CONF) log.set_defaults(default_log_levels=log.get_default_log_levels() + extra_log_level_defaults) def setup_logging(): """Sets up logging for the keystone package.""" log.setup(CONF, 'keystone') logging.captureWarnings(True) def find_paste_config(): """Find Keystone's paste.deploy configuration file. Keystone's paste.deploy configuration file is specified in the ``[paste_deploy]`` section of the main Keystone configuration file, ``keystone.conf``. For example:: [paste_deploy] config_file = keystone-paste.ini :returns: The selected configuration filename :raises: exception.ConfigFileNotFound """ if CONF.paste_deploy.config_file: paste_config = CONF.paste_deploy.config_file paste_config_value = paste_config if not os.path.isabs(paste_config): paste_config = CONF.find_file(paste_config) elif CONF.config_file: paste_config = CONF.config_file[0] paste_config_value = paste_config else: # this provides backwards compatibility for keystone.conf files that # still have the entire paste configuration included, rather than just # a [paste_deploy] configuration section referring to an external file paste_config = CONF.find_file('keystone.conf') paste_config_value = 'keystone.conf' if not paste_config or not os.path.exists(paste_config): raise exception.ConfigFileNotFound(config_file=paste_config_value) return paste_config def configure(conf=None): if conf is None: conf = CONF conf.register_cli_opt( cfg.BoolOpt('standard-threads', default=False, help='Do not monkey-patch threading system modules.')) conf.register_cli_opt( cfg.StrOpt('pydev-debug-host', help='Host to connect to for remote debugger.')) conf.register_cli_opt( cfg.PortOpt('pydev-debug-port', help='Port to connect to for remote debugger.')) for section in FILE_OPTIONS: for option in FILE_OPTIONS[section]: if section: conf.register_opt(option, group=section) else: conf.register_opt(option) # register any non-default auth methods here (used by extensions, etc) setup_authentication(conf) # add oslo.cache related config options cache.configure(conf) def list_opts(): """Return a list of oslo_config options available in Keystone. The returned list includes all oslo_config options which are registered as the "FILE_OPTIONS" in keystone.common.config. This list will not include the options from the oslo-incubator library or any options registered dynamically at run time. Each object in the list is a two element tuple. The first element of each tuple is the name of the group under which the list of options in the second element will be registered. A group name of None corresponds to the [DEFAULT] group in config files. This function is also discoverable via the 'oslo_config.opts' entry point under the 'keystone.config.opts' namespace. The purpose of this is to allow tools like the Oslo sample config file generator to discover the options exposed to users by this library. :returns: a list of (group_name, opts) tuples """ return list(FILE_OPTIONS.items()) def set_middleware_defaults(): """Update default configuration options for oslo.middleware.""" # CORS Defaults # TODO(krotscheck): Update with https://review.openstack.org/#/c/285368/ cfg.set_defaults(cors.CORS_OPTS, allow_headers=['X-Auth-Token', 'X-Openstack-Request-Id', 'X-Subject-Token', 'X-Project-Id', 'X-Project-Name', 'X-Project-Domain-Id', 'X-Project-Domain-Name', 'X-Domain-Id', 'X-Domain-Name'], expose_headers=['X-Auth-Token', 'X-Openstack-Request-Id', 'X-Subject-Token'], allow_methods=['GET', 'PUT', 'POST', 'DELETE', 'PATCH'] ) def set_config_defaults(): """Override all configuration default values for keystone.""" set_default_for_default_log_levels() set_middleware_defaults() keystone-9.0.0/keystone/common/authorization.py0000664000567000056710000001074112701407102023122 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 - 2012 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from keystone import exception from keystone.i18n import _, _LW from keystone.models import token_model AUTH_CONTEXT_ENV = 'KEYSTONE_AUTH_CONTEXT' """Environment variable used to convey the Keystone auth context. Auth context is essentially the user credential used for policy enforcement. It is a dictionary with the following attributes: * ``token``: Token from the request * ``user_id``: user ID of the principal * ``user_domain_id`` (optional): Domain ID of the principal if the principal has a domain. * ``project_id`` (optional): project ID of the scoped project if auth is project-scoped * ``project_domain_id`` (optional): Domain ID of the scoped project if auth is project-scoped. * ``domain_id`` (optional): domain ID of the scoped domain if auth is domain-scoped * ``domain_name`` (optional): domain name of the scoped domain if auth is domain-scoped * ``is_delegated_auth``: True if this is delegated (via trust or oauth) * ``trust_id``: Trust ID if trust-scoped, or None * ``trustor_id``: Trustor ID if trust-scoped, or None * ``trustee_id``: Trustee ID if trust-scoped, or None * ``consumer_id``: OAuth consumer ID, or None * ``access_token_id``: OAuth access token ID, or None * ``roles`` (optional): list of role names for the given scope * ``group_ids`` (optional): list of group IDs for which the API user has membership if token was for a federated user """ LOG = log.getLogger(__name__) def token_to_auth_context(token): if not isinstance(token, token_model.KeystoneToken): raise exception.UnexpectedError(_('token reference must be a ' 'KeystoneToken type, got: %s') % type(token)) auth_context = {'token': token, 'is_delegated_auth': False} try: auth_context['user_id'] = token.user_id except KeyError: LOG.warning(_LW('RBAC: Invalid user data in token')) raise exception.Unauthorized() auth_context['user_domain_id'] = token.user_domain_id if token.project_scoped: auth_context['project_id'] = token.project_id auth_context['project_domain_id'] = token.project_domain_id elif token.domain_scoped: auth_context['domain_id'] = token.domain_id auth_context['domain_name'] = token.domain_name else: LOG.debug('RBAC: Proceeding without project or domain scope') if token.trust_scoped: auth_context['is_delegated_auth'] = True auth_context['trust_id'] = token.trust_id auth_context['trustor_id'] = token.trustor_user_id auth_context['trustee_id'] = token.trustee_user_id else: # NOTE(lbragstad): These variables will already be set to None but we # add the else statement here for readability. auth_context['trust_id'] = None auth_context['trustor_id'] = None auth_context['trustee_id'] = None roles = token.role_names if roles: auth_context['roles'] = roles if token.oauth_scoped: auth_context['is_delegated_auth'] = True auth_context['consumer_id'] = token.oauth_consumer_id auth_context['access_token_id'] = token.oauth_access_token_id else: # NOTE(lbragstad): These variables will already be set to None but we # add the else statement here for readability. auth_context['consumer_id'] = None auth_context['access_token_id'] = None if token.is_federated_user: auth_context['group_ids'] = token.federation_group_ids return auth_context keystone-9.0.0/keystone/common/json_home.py0000664000567000056710000000624612701407102022210 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone import exception from keystone.i18n import _ def build_v3_resource_relation(resource_name): return ('http://docs.openstack.org/api/openstack-identity/3/rel/%s' % resource_name) def build_v3_extension_resource_relation(extension_name, extension_version, resource_name): return ( 'http://docs.openstack.org/api/openstack-identity/3/ext/%s/%s/rel/%s' % (extension_name, extension_version, resource_name)) def build_v3_parameter_relation(parameter_name): return ('http://docs.openstack.org/api/openstack-identity/3/param/%s' % parameter_name) def build_v3_extension_parameter_relation(extension_name, extension_version, parameter_name): return ( 'http://docs.openstack.org/api/openstack-identity/3/ext/%s/%s/param/' '%s' % (extension_name, extension_version, parameter_name)) class Parameters(object): """Relationships for Common parameters.""" DOMAIN_ID = build_v3_parameter_relation('domain_id') ENDPOINT_ID = build_v3_parameter_relation('endpoint_id') GROUP_ID = build_v3_parameter_relation('group_id') POLICY_ID = build_v3_parameter_relation('policy_id') PROJECT_ID = build_v3_parameter_relation('project_id') REGION_ID = build_v3_parameter_relation('region_id') ROLE_ID = build_v3_parameter_relation('role_id') SERVICE_ID = build_v3_parameter_relation('service_id') USER_ID = build_v3_parameter_relation('user_id') class Status(object): """Status values supported.""" DEPRECATED = 'deprecated' EXPERIMENTAL = 'experimental' STABLE = 'stable' @classmethod def update_resource_data(cls, resource_data, status): if status is cls.STABLE: # We currently do not add a status if the resource is stable, the # absence of the status property can be taken as meaning that the # resource is stable. return if status is cls.DEPRECATED or status is cls.EXPERIMENTAL: resource_data['hints'] = {'status': status} return raise exception.Error(message=_( 'Unexpected status requested for JSON Home response, %s') % status) def translate_urls(json_home, new_prefix): """Given a JSON Home document, sticks new_prefix on each of the urls.""" for dummy_rel, resource in json_home['resources'].items(): if 'href' in resource: resource['href'] = new_prefix + resource['href'] elif 'href-template' in resource: resource['href-template'] = new_prefix + resource['href-template'] keystone-9.0.0/keystone/common/openssl.py0000664000567000056710000003271012701407102021705 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import os from oslo_config import cfg from oslo_log import log from keystone.common import environment from keystone.common import utils from keystone.i18n import _LI, _LE, _LW LOG = log.getLogger(__name__) CONF = cfg.CONF PUBLIC_DIR_PERMS = 0o755 # -rwxr-xr-x PRIVATE_DIR_PERMS = 0o750 # -rwxr-x--- PUBLIC_FILE_PERMS = 0o644 # -rw-r--r-- PRIVATE_FILE_PERMS = 0o640 # -rw-r----- def file_exists(file_path): return os.path.exists(file_path) class BaseCertificateConfigure(object): """Create a certificate signing environment. This is based on a config section and reasonable OpenSSL defaults. """ def __init__(self, conf_obj, server_conf_obj, keystone_user, keystone_group, rebuild, **kwargs): self.conf_dir = os.path.dirname(server_conf_obj.ca_certs) self.use_keystone_user = keystone_user self.use_keystone_group = keystone_group self.rebuild = rebuild self.ssl_config_file_name = os.path.join(self.conf_dir, "openssl.conf") self.request_file_name = os.path.join(self.conf_dir, "req.pem") self.ssl_dictionary = {'conf_dir': self.conf_dir, 'ca_cert': server_conf_obj.ca_certs, 'default_md': 'default', 'ssl_config': self.ssl_config_file_name, 'ca_private_key': conf_obj.ca_key, 'request_file': self.request_file_name, 'signing_key': server_conf_obj.keyfile, 'signing_cert': server_conf_obj.certfile, 'key_size': int(conf_obj.key_size), 'valid_days': int(conf_obj.valid_days), 'cert_subject': conf_obj.cert_subject} try: # OpenSSL 1.0 and newer support default_md = default, # older versions do not openssl_ver = environment.subprocess.check_output( # the arguments # are hardcoded and just check the openssl version ['openssl', 'version']) if b'OpenSSL 0.' in openssl_ver: self.ssl_dictionary['default_md'] = 'sha1' except environment.subprocess.CalledProcessError: LOG.warning(_LW('Failed to invoke ``openssl version``, ' 'assuming is v1.0 or newer')) self.ssl_dictionary.update(kwargs) def exec_command(self, command): to_exec = [part % self.ssl_dictionary for part in command] LOG.info(_LI('Running command - %s'), ' '.join(to_exec)) try: # NOTE(shaleh): use check_output instead of the simpler # `check_call()` in order to log any output from an error. environment.subprocess.check_output( # the arguments being passed # in are defined in this file and trusted to build CAs, keys # and certs to_exec, stderr=environment.subprocess.STDOUT) except environment.subprocess.CalledProcessError as e: LOG.error(_LE('Command %(to_exec)s exited with %(retcode)s ' '- %(output)s'), {'to_exec': to_exec, 'retcode': e.returncode, 'output': e.output}) raise e def clean_up_existing_files(self): files_to_clean = [self.ssl_dictionary['ca_private_key'], self.ssl_dictionary['ca_cert'], self.ssl_dictionary['signing_key'], self.ssl_dictionary['signing_cert'], ] existing_files = [] for file_path in files_to_clean: if file_exists(file_path): if self.rebuild: # The file exists but the user wants to rebuild it, so blow # it away try: os.remove(file_path) except OSError as exc: LOG.error(_LE('Failed to remove file %(file_path)r: ' '%(error)s'), {'file_path': file_path, 'error': exc.strerror}) raise else: existing_files.append(file_path) return existing_files def build_ssl_config_file(self): utils.make_dirs(os.path.dirname(self.ssl_config_file_name), mode=PUBLIC_DIR_PERMS, user=self.use_keystone_user, group=self.use_keystone_group, log=LOG) if not file_exists(self.ssl_config_file_name): with open(self.ssl_config_file_name, 'w') as ssl_config_file: ssl_config_file.write(self.sslconfig % self.ssl_dictionary) utils.set_permissions(self.ssl_config_file_name, mode=PRIVATE_FILE_PERMS, user=self.use_keystone_user, group=self.use_keystone_group, log=LOG) index_file_name = os.path.join(self.conf_dir, 'index.txt') if not file_exists(index_file_name): with open(index_file_name, 'w') as index_file: index_file.write('') utils.set_permissions(index_file_name, mode=PRIVATE_FILE_PERMS, user=self.use_keystone_user, group=self.use_keystone_group, log=LOG) serial_file_name = os.path.join(self.conf_dir, 'serial') if not file_exists(serial_file_name): with open(serial_file_name, 'w') as index_file: index_file.write('01') utils.set_permissions(serial_file_name, mode=PRIVATE_FILE_PERMS, user=self.use_keystone_user, group=self.use_keystone_group, log=LOG) def build_ca_cert(self): ca_key_file = self.ssl_dictionary['ca_private_key'] utils.make_dirs(os.path.dirname(ca_key_file), mode=PRIVATE_DIR_PERMS, user=self.use_keystone_user, group=self.use_keystone_group, log=LOG) if not file_exists(ca_key_file): self.exec_command(['openssl', 'genrsa', '-out', '%(ca_private_key)s', '%(key_size)d']) utils.set_permissions(ca_key_file, mode=PRIVATE_FILE_PERMS, user=self.use_keystone_user, group=self.use_keystone_group, log=LOG) ca_cert = self.ssl_dictionary['ca_cert'] utils.make_dirs(os.path.dirname(ca_cert), mode=PUBLIC_DIR_PERMS, user=self.use_keystone_user, group=self.use_keystone_group, log=LOG) if not file_exists(ca_cert): self.exec_command(['openssl', 'req', '-new', '-x509', '-extensions', 'v3_ca', '-key', '%(ca_private_key)s', '-out', '%(ca_cert)s', '-days', '%(valid_days)d', '-config', '%(ssl_config)s', '-subj', '%(cert_subject)s']) utils.set_permissions(ca_cert, mode=PUBLIC_FILE_PERMS, user=self.use_keystone_user, group=self.use_keystone_group, log=LOG) def build_private_key(self): signing_keyfile = self.ssl_dictionary['signing_key'] utils.make_dirs(os.path.dirname(signing_keyfile), mode=PRIVATE_DIR_PERMS, user=self.use_keystone_user, group=self.use_keystone_group, log=LOG) if not file_exists(signing_keyfile): self.exec_command(['openssl', 'genrsa', '-out', '%(signing_key)s', '%(key_size)d']) utils.set_permissions(signing_keyfile, mode=PRIVATE_FILE_PERMS, user=self.use_keystone_user, group=self.use_keystone_group, log=LOG) def build_signing_cert(self): signing_cert = self.ssl_dictionary['signing_cert'] utils.make_dirs(os.path.dirname(signing_cert), mode=PUBLIC_DIR_PERMS, user=self.use_keystone_user, group=self.use_keystone_group, log=LOG) if not file_exists(signing_cert): self.exec_command(['openssl', 'req', '-key', '%(signing_key)s', '-new', '-out', '%(request_file)s', '-config', '%(ssl_config)s', '-subj', '%(cert_subject)s']) self.exec_command(['openssl', 'ca', '-batch', '-out', '%(signing_cert)s', '-config', '%(ssl_config)s', '-days', '%(valid_days)dd', '-cert', '%(ca_cert)s', '-keyfile', '%(ca_private_key)s', '-infiles', '%(request_file)s']) def run(self): try: existing_files = self.clean_up_existing_files() except OSError: print('An error occurred when rebuilding cert files.') return if existing_files: print('The following cert files already exist, use --rebuild to ' 'remove the existing files before regenerating:') for f in existing_files: print('%s already exists' % f) return self.build_ssl_config_file() self.build_ca_cert() self.build_private_key() self.build_signing_cert() class ConfigurePKI(BaseCertificateConfigure): """Generate files for PKI signing using OpenSSL. Signed tokens require a private key and signing certificate which itself must be signed by a CA. This class generates them with workable defaults if each of the files are not present """ def __init__(self, keystone_user, keystone_group, rebuild=False): super(ConfigurePKI, self).__init__(CONF.signing, CONF.signing, keystone_user, keystone_group, rebuild=rebuild) class ConfigureSSL(BaseCertificateConfigure): """Generate files for HTTPS using OpenSSL. Creates a public/private key and certificates. If a CA is not given one will be generated using provided arguments. """ def __init__(self, keystone_user, keystone_group, rebuild=False): super(ConfigureSSL, self).__init__(CONF.ssl, CONF.eventlet_server_ssl, keystone_user, keystone_group, rebuild=rebuild) BaseCertificateConfigure.sslconfig = """ # OpenSSL configuration file. # # Establish working directory. dir = %(conf_dir)s [ ca ] default_ca = CA_default [ CA_default ] new_certs_dir = $dir serial = $dir/serial database = $dir/index.txt default_days = 365 default_md = %(default_md)s preserve = no email_in_dn = no nameopt = default_ca certopt = default_ca policy = policy_anything x509_extensions = usr_cert unique_subject = no [ policy_anything ] countryName = optional stateOrProvinceName = optional organizationName = optional organizationalUnitName = optional commonName = supplied emailAddress = optional [ req ] default_bits = 2048 # Size of keys default_keyfile = key.pem # name of generated keys string_mask = utf8only # permitted characters distinguished_name = req_distinguished_name req_extensions = v3_req x509_extensions = v3_ca [ req_distinguished_name ] countryName = Country Name (2 letter code) countryName_min = 2 countryName_max = 2 stateOrProvinceName = State or Province Name (full name) localityName = Locality Name (city, district) 0.organizationName = Organization Name (company) organizationalUnitName = Organizational Unit Name (department, division) commonName = Common Name (hostname, IP, or your name) commonName_max = 64 emailAddress = Email Address emailAddress_max = 64 [ v3_ca ] basicConstraints = CA:TRUE subjectKeyIdentifier = hash authorityKeyIdentifier = keyid:always,issuer [ v3_req ] basicConstraints = CA:FALSE keyUsage = nonRepudiation, digitalSignature, keyEncipherment [ usr_cert ] basicConstraints = CA:FALSE subjectKeyIdentifier = hash authorityKeyIdentifier = keyid:always """ keystone-9.0.0/keystone/common/sql/0000775000567000056710000000000012701407246020455 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/common/sql/migrate_repo/0000775000567000056710000000000012701407246023132 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/common/sql/migrate_repo/README0000664000567000056710000000017212701407102024001 0ustar jenkinsjenkins00000000000000This is a database migration repository. More information at https://git.openstack.org/cgit/openstack/sqlalchemy-migrate keystone-9.0.0/keystone/common/sql/migrate_repo/versions/0000775000567000056710000000000012701407246025002 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/common/sql/migrate_repo/versions/089_add_root_of_all_domains.py0000664000567000056710000000500512701407102032564 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy as sql _PROJECT_TABLE_NAME = 'project' _DOMAIN_TABLE_NAME = 'domain' NULL_DOMAIN_ID = '<>' def upgrade(migrate_engine): def _generate_root_domain_project(): # Generate a project that will act as a root for all domains, in order # for use to be able to use a FK constraint on domain_id. Projects # acting as a domain will not reference this as their parent_id, just # as domain_id. # # This special project is filtered out by the driver, so is never # visible to the manager or API. project_ref = { 'id': NULL_DOMAIN_ID, 'name': NULL_DOMAIN_ID, 'enabled': False, 'description': '', 'domain_id': NULL_DOMAIN_ID, 'is_domain': True, 'parent_id': None, 'extra': '{}' } return project_ref def _generate_root_domain(): # Generate a similar root for the domain table, this is an interim # step so as to allow continuation of current project domain_id FK. # # This special domain is filtered out by the driver, so is never # visible to the manager or API. domain_ref = { 'id': NULL_DOMAIN_ID, 'name': NULL_DOMAIN_ID, 'enabled': False, 'extra': '{}' } return domain_ref meta = sql.MetaData() meta.bind = migrate_engine session = sql.orm.sessionmaker(bind=migrate_engine)() project_table = sql.Table(_PROJECT_TABLE_NAME, meta, autoload=True) domain_table = sql.Table(_DOMAIN_TABLE_NAME, meta, autoload=True) root_domain = _generate_root_domain() new_entry = domain_table.insert().values(**root_domain) session.execute(new_entry) session.commit() root_domain_project = _generate_root_domain_project() new_entry = project_table.insert().values(**root_domain_project) session.execute(new_entry) session.commit() session.close() keystone-9.0.0/keystone/common/sql/migrate_repo/versions/088_domain_specific_roles.py0000664000567000056710000000433212701407102032264 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import migrate import sqlalchemy as sql _ROLE_NAME_NEW_CONSTRAINT = 'ixu_role_name_domain_id' _ROLE_TABLE_NAME = 'role' _ROLE_NAME_COLUMN_NAME = 'name' _DOMAIN_ID_COLUMN_NAME = 'domain_id' _NULL_DOMAIN_ID = '<>' def upgrade(migrate_engine): meta = sql.MetaData() meta.bind = migrate_engine role_table = sql.Table(_ROLE_TABLE_NAME, meta, autoload=True) domain_id = sql.Column(_DOMAIN_ID_COLUMN_NAME, sql.String(64), nullable=False, server_default=_NULL_DOMAIN_ID) # NOTE(morganfainberg): the `role_name` unique constraint is not # guaranteed to be a fixed name, such as 'ixu_role_name`, so we need to # search for the correct constraint that only affects role_table.c.name # and drop that constraint. to_drop = None if migrate_engine.name == 'mysql': for c in role_table.indexes: if (c.unique and len(c.columns) == 1 and _ROLE_NAME_COLUMN_NAME in c.columns): to_drop = c break else: for c in role_table.constraints: if len(c.columns) == 1 and _ROLE_NAME_COLUMN_NAME in c.columns: to_drop = c break if to_drop is not None: migrate.UniqueConstraint(role_table.c.name, name=to_drop.name).drop() # perform changes after constraint is dropped. if 'domain_id' not in role_table.columns: # Only create the column if it doesn't already exist. role_table.create_column(domain_id) migrate.UniqueConstraint(role_table.c.name, role_table.c.domain_id, name=_ROLE_NAME_NEW_CONSTRAINT).create() keystone-9.0.0/keystone/common/sql/migrate_repo/versions/087_implied_roles.py0000664000567000056710000000275712701407102030603 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import migrate import sqlalchemy as sql ROLE_TABLE = 'role' def upgrade(migrate_engine): meta = sql.MetaData() meta.bind = migrate_engine implied_role = sql.Table( 'implied_role', meta, sql.Column('prior_role_id', sql.String(length=64), primary_key=True), sql.Column( 'implied_role_id', sql.String(length=64), primary_key=True), mysql_engine='InnoDB', mysql_charset='utf8') implied_role.create() role = sql.Table(ROLE_TABLE, meta, autoload=True) fkeys = [ {'columns': [implied_role.c.prior_role_id], 'references': [role.c.id]}, {'columns': [implied_role.c.implied_role_id], 'references': [role.c.id]}, ] for fkey in fkeys: migrate.ForeignKeyConstraint(columns=fkey['columns'], refcolumns=fkey['references'], name=fkey.get('name')).create() keystone-9.0.0/keystone/common/sql/migrate_repo/versions/070_placeholder.py0000664000567000056710000000136412701407102030217 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This is a placeholder for Kilo backports. Do not use this number for new # Liberty work. New Liberty work starts after all the placeholders. def upgrade(migrate_engine): pass keystone-9.0.0/keystone/common/sql/migrate_repo/versions/094_add_federated_user_table.py0000664000567000056710000000346212701407102032704 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import migrate import sqlalchemy as sql def upgrade(migrate_engine): meta = sql.MetaData() meta.bind = migrate_engine user_table = sql.Table('user', meta, autoload=True) idp_table = sql.Table('identity_provider', meta, autoload=True) protocol_table = sql.Table('federation_protocol', meta, autoload=True) federated_table = sql.Table( 'federated_user', meta, sql.Column('id', sql.Integer, primary_key=True, nullable=False), sql.Column('user_id', sql.String(64), sql.ForeignKey(user_table.c.id, ondelete='CASCADE'), nullable=False), sql.Column('idp_id', sql.String(64), sql.ForeignKey(idp_table.c.id, ondelete='CASCADE'), nullable=False), sql.Column('protocol_id', sql.String(64), nullable=False), sql.Column('unique_id', sql.String(255), nullable=False), sql.Column('display_name', sql.String(255), nullable=True), sql.UniqueConstraint('idp_id', 'protocol_id', 'unique_id')) federated_table.create(migrate_engine, checkfirst=True) migrate.ForeignKeyConstraint( columns=[federated_table.c.protocol_id, federated_table.c.idp_id], refcolumns=[protocol_table.c.id, protocol_table.c.idp_id]).create() keystone-9.0.0/keystone/common/sql/migrate_repo/versions/085_add_endpoint_filtering_table.py0000664000567000056710000000464312701407102033610 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy as sql from keystone.common.sql import migration_helpers def upgrade(migrate_engine): try: extension_version = migration_helpers.get_db_version( extension='endpoint_filter', engine=migrate_engine) except Exception: extension_version = 0 # This migration corresponds to endpoint_filter extension migration 2. Only # update if it has not been run. if extension_version >= 2: return # Upgrade operations go here. Don't create your own engine; bind # migrate_engine to your metadata meta = sql.MetaData() meta.bind = migrate_engine EP_GROUP_ID = 'endpoint_group_id' PROJECT_ID = 'project_id' endpoint_filtering_table = sql.Table( 'project_endpoint', meta, sql.Column( 'endpoint_id', sql.String(64), primary_key=True, nullable=False), sql.Column( 'project_id', sql.String(64), primary_key=True, nullable=False)) endpoint_filtering_table.create(migrate_engine, checkfirst=True) endpoint_group_table = sql.Table( 'endpoint_group', meta, sql.Column('id', sql.String(64), primary_key=True), sql.Column('name', sql.String(255), nullable=False), sql.Column('description', sql.Text, nullable=True), sql.Column('filters', sql.Text(), nullable=False)) endpoint_group_table.create(migrate_engine, checkfirst=True) project_endpoint_group_table = sql.Table( 'project_endpoint_group', meta, sql.Column(EP_GROUP_ID, sql.String(64), sql.ForeignKey('endpoint_group.id'), nullable=False), sql.Column(PROJECT_ID, sql.String(64), nullable=False), sql.PrimaryKeyConstraint(EP_GROUP_ID, PROJECT_ID)) project_endpoint_group_table.create(migrate_engine, checkfirst=True) keystone-9.0.0/keystone/common/sql/migrate_repo/versions/084_add_revoke_tables.py0000664000567000056710000000401512701407102031373 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy as sql from keystone.common.sql import migration_helpers def upgrade(migrate_engine): try: extension_version = migration_helpers.get_db_version( extension='revoke', engine=migrate_engine) except Exception: extension_version = 0 # This migration corresponds to revoke extension migration 2. Only # update if it has not been run. if extension_version >= 2: return # Upgrade operations go here. Don't create your own engine; bind # migrate_engine to your metadata meta = sql.MetaData() meta.bind = migrate_engine service_table = sql.Table( 'revocation_event', meta, sql.Column('id', sql.String(64), primary_key=True), sql.Column('domain_id', sql.String(64)), sql.Column('project_id', sql.String(64)), sql.Column('user_id', sql.String(64)), sql.Column('role_id', sql.String(64)), sql.Column('trust_id', sql.String(64)), sql.Column('consumer_id', sql.String(64)), sql.Column('access_token_id', sql.String(64)), sql.Column('issued_before', sql.DateTime(), nullable=False), sql.Column('expires_at', sql.DateTime()), sql.Column('revoked_at', sql.DateTime(), index=True, nullable=False), sql.Column('audit_id', sql.String(32), nullable=True), sql.Column('audit_chain_id', sql.String(32), nullable=True)) service_table.create(migrate_engine, checkfirst=True) keystone-9.0.0/keystone/common/sql/migrate_repo/versions/082_add_federation_tables.py0000664000567000056710000000703412701407102032222 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg import sqlalchemy as sql from keystone.common.sql import migration_helpers CONF = cfg.CONF _RELAY_STATE_PREFIX = 'relay_state_prefix' def upgrade(migrate_engine): try: extension_version = migration_helpers.get_db_version( extension='federation', engine=migrate_engine) except Exception: extension_version = 0 # This migration corresponds to federation extension migration 8. Only # update if it has not been run. if extension_version >= 8: return # Upgrade operations go here. Don't create your own engine; bind # migrate_engine to your metadata meta = sql.MetaData() meta.bind = migrate_engine idp_table = sql.Table( 'identity_provider', meta, sql.Column('id', sql.String(64), primary_key=True), sql.Column('enabled', sql.Boolean, nullable=False), sql.Column('description', sql.Text(), nullable=True), mysql_engine='InnoDB', mysql_charset='utf8') idp_table.create(migrate_engine, checkfirst=True) federation_protocol_table = sql.Table( 'federation_protocol', meta, sql.Column('id', sql.String(64), primary_key=True), sql.Column('idp_id', sql.String(64), sql.ForeignKey('identity_provider.id', ondelete='CASCADE'), primary_key=True), sql.Column('mapping_id', sql.String(64), nullable=False), mysql_engine='InnoDB', mysql_charset='utf8') federation_protocol_table.create(migrate_engine, checkfirst=True) mapping_table = sql.Table( 'mapping', meta, sql.Column('id', sql.String(64), primary_key=True), sql.Column('rules', sql.Text(), nullable=False), mysql_engine='InnoDB', mysql_charset='utf8') mapping_table.create(migrate_engine, checkfirst=True) relay_state_prefix_default = CONF.saml.relay_state_prefix sp_table = sql.Table( 'service_provider', meta, sql.Column('auth_url', sql.String(256), nullable=False), sql.Column('id', sql.String(64), primary_key=True), sql.Column('enabled', sql.Boolean, nullable=False), sql.Column('description', sql.Text(), nullable=True), sql.Column('sp_url', sql.String(256), nullable=False), sql.Column(_RELAY_STATE_PREFIX, sql.String(256), nullable=False, server_default=relay_state_prefix_default), mysql_engine='InnoDB', mysql_charset='utf8') sp_table.create(migrate_engine, checkfirst=True) idp_table = sql.Table('identity_provider', meta, autoload=True) remote_id_table = sql.Table( 'idp_remote_ids', meta, sql.Column('idp_id', sql.String(64), sql.ForeignKey('identity_provider.id', ondelete='CASCADE')), sql.Column('remote_id', sql.String(255), primary_key=True), mysql_engine='InnoDB', mysql_charset='utf8') remote_id_table.create(migrate_engine, checkfirst=True) keystone-9.0.0/keystone/common/sql/migrate_repo/versions/083_add_oauth1_tables.py0000664000567000056710000000600612701407102031302 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy as sql from keystone.common.sql import migration_helpers def upgrade(migrate_engine): try: extension_version = migration_helpers.get_db_version( extension='oauth1', engine=migrate_engine) except Exception: extension_version = 0 # This migration corresponds to oauth extension migration 5. Only # update if it has not been run. if extension_version >= 5: return # Upgrade operations go here. Don't create your own engine; bind # migrate_engine to your metadata meta = sql.MetaData() meta.bind = migrate_engine consumer_table = sql.Table( 'consumer', meta, sql.Column('id', sql.String(64), primary_key=True, nullable=False), sql.Column('description', sql.String(64), nullable=True), sql.Column('secret', sql.String(64), nullable=False), sql.Column('extra', sql.Text(), nullable=False)) consumer_table.create(migrate_engine, checkfirst=True) request_token_table = sql.Table( 'request_token', meta, sql.Column('id', sql.String(64), primary_key=True, nullable=False), sql.Column('request_secret', sql.String(64), nullable=False), sql.Column('verifier', sql.String(64), nullable=True), sql.Column('authorizing_user_id', sql.String(64), nullable=True), sql.Column('requested_project_id', sql.String(64), nullable=False), sql.Column('role_ids', sql.Text(), nullable=True), sql.Column('consumer_id', sql.String(64), sql.ForeignKey('consumer.id'), nullable=False, index=True), sql.Column('expires_at', sql.String(64), nullable=True)) request_token_table.create(migrate_engine, checkfirst=True) access_token_table = sql.Table( 'access_token', meta, sql.Column('id', sql.String(64), primary_key=True, nullable=False), sql.Column('access_secret', sql.String(64), nullable=False), sql.Column('authorizing_user_id', sql.String(64), nullable=False, index=True), sql.Column('project_id', sql.String(64), nullable=False), sql.Column('role_ids', sql.Text(), nullable=False), sql.Column('consumer_id', sql.String(64), sql.ForeignKey('consumer.id'), nullable=False, index=True), sql.Column('expires_at', sql.String(64), nullable=True)) access_token_table.create(migrate_engine, checkfirst=True) keystone-9.0.0/keystone/common/sql/migrate_repo/versions/090_add_local_user_and_password_tables.py0000664000567000056710000000316212701407102034773 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy as sql def upgrade(migrate_engine): meta = sql.MetaData() meta.bind = migrate_engine user = sql.Table('user', meta, autoload=True) local_user = sql.Table( 'local_user', meta, sql.Column('id', sql.Integer, primary_key=True, nullable=False), sql.Column('user_id', sql.String(64), sql.ForeignKey(user.c.id, ondelete='CASCADE'), nullable=False, unique=True), sql.Column('domain_id', sql.String(64), nullable=False), sql.Column('name', sql.String(255), nullable=False), sql.UniqueConstraint('domain_id', 'name')) local_user.create(migrate_engine, checkfirst=True) password = sql.Table( 'password', meta, sql.Column('id', sql.Integer, primary_key=True, nullable=False), sql.Column('local_user_id', sql.Integer, sql.ForeignKey(local_user.c.id, ondelete='CASCADE'), nullable=False), sql.Column('password', sql.String(128), nullable=False)) password.create(migrate_engine, checkfirst=True) keystone-9.0.0/keystone/common/sql/migrate_repo/versions/__init__.py0000664000567000056710000000000012701407102027070 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/common/sql/migrate_repo/versions/071_placeholder.py0000664000567000056710000000136412701407102030220 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This is a placeholder for Kilo backports. Do not use this number for new # Liberty work. New Liberty work starts after all the placeholders. def upgrade(migrate_engine): pass ././@LongLink0000000000000000000000000000015300000000000011214 Lustar 00000000000000keystone-9.0.0/keystone/common/sql/migrate_repo/versions/095_add_integer_pkey_to_revocation_event_table.pykeystone-9.0.0/keystone/common/sql/migrate_repo/versions/095_add_integer_pkey_to_revocation_event_ta0000664000567000056710000000557212701407102035437 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy as sql def upgrade(migrate_engine): meta = sql.MetaData() meta.bind = migrate_engine # You can specify primary keys when creating tables, however adding # auto-increment integer primary keys for existing tables is not # cross-engine compatibility supported. Thus, the approach is to: # (1) create a new revocation_event table with an int pkey, # (2) migrate data from the old table to the new table, # (3) delete the old revocation_event table # (4) rename the new revocation_event table revocation_table = sql.Table('revocation_event', meta, autoload=True) revocation_table_new = sql.Table( 'revocation_event_new', meta, sql.Column('id', sql.Integer, primary_key=True), sql.Column('domain_id', sql.String(64)), sql.Column('project_id', sql.String(64)), sql.Column('user_id', sql.String(64)), sql.Column('role_id', sql.String(64)), sql.Column('trust_id', sql.String(64)), sql.Column('consumer_id', sql.String(64)), sql.Column('access_token_id', sql.String(64)), sql.Column('issued_before', sql.DateTime(), nullable=False), sql.Column('expires_at', sql.DateTime()), sql.Column('revoked_at', sql.DateTime(), index=True, nullable=False), sql.Column('audit_id', sql.String(32), nullable=True), sql.Column('audit_chain_id', sql.String(32), nullable=True)) revocation_table_new.create(migrate_engine, checkfirst=True) revocation_table_new.insert().from_select(['domain_id', 'project_id', 'user_id', 'role_id', 'trust_id', 'consumer_id', 'access_token_id', 'issued_before', 'expires_at', 'revoked_at', 'audit_id', 'audit_chain_id'], revocation_table.select()) revocation_table.drop() revocation_table_new.rename('revocation_event') keystone-9.0.0/keystone/common/sql/migrate_repo/versions/077_placeholder.py0000664000567000056710000000136512701407102030227 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This is a placeholder for Liberty backports. Do not use this number for new # Mitaka work. New Mitaka work starts after all the placeholders. def upgrade(migrate_engine): pass keystone-9.0.0/keystone/common/sql/migrate_repo/versions/093_migrate_domains_to_projects.py0000664000567000056710000001132512701407102033515 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import sqlalchemy as sql from keystone.common.sql import migration_helpers _PROJECT_TABLE_NAME = 'project' _DOMAIN_TABLE_NAME = 'domain' _PARENT_ID_COLUMN_NAME = 'parent_id' _DOMAIN_ID_COLUMN_NAME = 'domain_id' # Above the driver level, the domain_id of a project acting as a domain is # None. However, in order to enable sql integrity constraints to still operate # on this column, we create a special "root of all domains" row, with an ID of # NULL_DOMAIN_ID, which all projects acting as a domain reference in their # domain_id attribute. This special row, as well as NULL_DOMAIN_ID, are never # exposed outside of sql driver layer. NULL_DOMAIN_ID = '<>' def list_existing_project_constraints(project_table, domain_table): constraints = [{'table': project_table, 'fk_column': _PARENT_ID_COLUMN_NAME, 'ref_column': project_table.c.id}, {'table': project_table, 'fk_column': _DOMAIN_ID_COLUMN_NAME, 'ref_column': domain_table.c.id}] return constraints def list_new_project_constraints(project_table): constraints = [{'table': project_table, 'fk_column': _PARENT_ID_COLUMN_NAME, 'ref_column': project_table.c.id}, {'table': project_table, 'fk_column': _DOMAIN_ID_COLUMN_NAME, 'ref_column': project_table.c.id}] return constraints def upgrade(migrate_engine): def _project_from_domain(domain): # Creates a project dict with is_domain=True from the provided # domain. description = None extra = {} if domain.extra is not None: # 'description' property is an extra attribute in domains but a # first class attribute in projects extra = json.loads(domain.extra) description = extra.pop('description', None) return { 'id': domain.id, 'name': domain.name, 'enabled': domain.enabled, 'description': description, 'domain_id': NULL_DOMAIN_ID, 'is_domain': True, 'parent_id': None, 'extra': json.dumps(extra) } meta = sql.MetaData() meta.bind = migrate_engine session = sql.orm.sessionmaker(bind=migrate_engine)() project_table = sql.Table(_PROJECT_TABLE_NAME, meta, autoload=True) domain_table = sql.Table(_DOMAIN_TABLE_NAME, meta, autoload=True) # NOTE(htruta): Remove the parent_id constraint during the migration # because for every root project inside this domain, we will set # the project domain_id to be its parent_id. We re-enable the constraint # in the end of this method. We also remove the domain_id constraint, # while be recreated a FK to the project_id at the end. migration_helpers.remove_constraints( list_existing_project_constraints(project_table, domain_table)) # For each domain, create a project acting as a domain. We ignore the # "root of all domains" row, since we already have one of these in the # project table. domains = list(domain_table.select().execute()) for domain in domains: if domain.id == NULL_DOMAIN_ID: continue is_domain_project = _project_from_domain(domain) new_entry = project_table.insert().values(**is_domain_project) session.execute(new_entry) session.commit() # For each project, that has no parent (i.e. a top level project), update # it's parent_id to point at the project acting as its domain. We ignore # the "root of all domains" row, since its parent_id must always be None. projects = list(project_table.select().execute()) for project in projects: if (project.parent_id is not None or project.is_domain or project.id == NULL_DOMAIN_ID): continue values = {'parent_id': project.domain_id} update = project_table.update().where( project_table.c.id == project.id).values(values) session.execute(update) session.commit() migration_helpers.add_constraints( list_new_project_constraints(project_table)) session.close() keystone-9.0.0/keystone/common/sql/migrate_repo/versions/092_make_implied_roles_fks_cascaded.py0000664000567000056710000000331212701407102034232 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import migrate import sqlalchemy as sql ROLE_TABLE = 'role' IMPLIED_ROLE_TABLE = 'implied_role' def upgrade(migrate_engine): meta = sql.MetaData() meta.bind = migrate_engine role = sql.Table(ROLE_TABLE, meta, autoload=True) implied_role = sql.Table(IMPLIED_ROLE_TABLE, meta, autoload=True) fkeys = [ {'columns': [implied_role.c.prior_role_id], 'references': [role.c.id]}, {'columns': [implied_role.c.implied_role_id], 'references': [role.c.id]}, ] # NOTE(stevemar): We need to divide these into two separate loops otherwise # they may clobber each other and only end up with one foreign key. for fkey in fkeys: migrate.ForeignKeyConstraint(columns=fkey['columns'], refcolumns=fkey['references'], name=fkey.get('name')).drop() for fkey in fkeys: migrate.ForeignKeyConstraint(columns=fkey['columns'], refcolumns=fkey['references'], name=fkey.get('name'), ondelete="CASCADE").create() keystone-9.0.0/keystone/common/sql/migrate_repo/versions/067_kilo.py0000664000567000056710000003123612701407102026702 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import migrate from oslo_log import log import sqlalchemy as sql from keystone.assignment.backends import sql as assignment_sql from keystone.common import sql as ks_sql from keystone.identity.mapping_backends import mapping as mapping_backend LOG = log.getLogger(__name__) def upgrade(migrate_engine): meta = sql.MetaData() meta.bind = migrate_engine if migrate_engine.name == 'mysql': # In Folsom we explicitly converted migrate_version to UTF8. migrate_engine.execute( 'ALTER TABLE migrate_version CONVERT TO CHARACTER SET utf8') # Set default DB charset to UTF8. migrate_engine.execute( 'ALTER DATABASE %s DEFAULT CHARACTER SET utf8' % migrate_engine.url.database) credential = sql.Table( 'credential', meta, sql.Column('id', sql.String(length=64), primary_key=True), sql.Column('user_id', sql.String(length=64), nullable=False), sql.Column('project_id', sql.String(length=64)), sql.Column('blob', ks_sql.JsonBlob, nullable=False), sql.Column('type', sql.String(length=255), nullable=False), sql.Column('extra', ks_sql.JsonBlob.impl), mysql_engine='InnoDB', mysql_charset='utf8') domain = sql.Table( 'domain', meta, sql.Column('id', sql.String(length=64), primary_key=True), sql.Column('name', sql.String(length=64), nullable=False), sql.Column('enabled', sql.Boolean, default=True, nullable=False), sql.Column('extra', ks_sql.JsonBlob.impl), mysql_engine='InnoDB', mysql_charset='utf8') endpoint = sql.Table( 'endpoint', meta, sql.Column('id', sql.String(length=64), primary_key=True), sql.Column('legacy_endpoint_id', sql.String(length=64)), sql.Column('interface', sql.String(length=8), nullable=False), sql.Column('service_id', sql.String(length=64), nullable=False), sql.Column('url', sql.Text, nullable=False), sql.Column('extra', ks_sql.JsonBlob.impl), sql.Column('enabled', sql.Boolean, nullable=False, default=True, server_default='1'), sql.Column('region_id', sql.String(length=255), nullable=True), mysql_engine='InnoDB', mysql_charset='utf8') group = sql.Table( 'group', meta, sql.Column('id', sql.String(length=64), primary_key=True), sql.Column('domain_id', sql.String(length=64), nullable=False), sql.Column('name', sql.String(length=64), nullable=False), sql.Column('description', sql.Text), sql.Column('extra', ks_sql.JsonBlob.impl), mysql_engine='InnoDB', mysql_charset='utf8') policy = sql.Table( 'policy', meta, sql.Column('id', sql.String(length=64), primary_key=True), sql.Column('type', sql.String(length=255), nullable=False), sql.Column('blob', ks_sql.JsonBlob, nullable=False), sql.Column('extra', ks_sql.JsonBlob.impl), mysql_engine='InnoDB', mysql_charset='utf8') project = sql.Table( 'project', meta, sql.Column('id', sql.String(length=64), primary_key=True), sql.Column('name', sql.String(length=64), nullable=False), sql.Column('extra', ks_sql.JsonBlob.impl), sql.Column('description', sql.Text), sql.Column('enabled', sql.Boolean), sql.Column('domain_id', sql.String(length=64), nullable=False), sql.Column('parent_id', sql.String(64), nullable=True), mysql_engine='InnoDB', mysql_charset='utf8') role = sql.Table( 'role', meta, sql.Column('id', sql.String(length=64), primary_key=True), sql.Column('name', sql.String(length=255), nullable=False), sql.Column('extra', ks_sql.JsonBlob.impl), mysql_engine='InnoDB', mysql_charset='utf8') service = sql.Table( 'service', meta, sql.Column('id', sql.String(length=64), primary_key=True), sql.Column('type', sql.String(length=255)), sql.Column('enabled', sql.Boolean, nullable=False, default=True, server_default='1'), sql.Column('extra', ks_sql.JsonBlob.impl), mysql_engine='InnoDB', mysql_charset='utf8') token = sql.Table( 'token', meta, sql.Column('id', sql.String(length=64), primary_key=True), sql.Column('expires', sql.DateTime, default=None), sql.Column('extra', ks_sql.JsonBlob.impl), sql.Column('valid', sql.Boolean, default=True, nullable=False), sql.Column('trust_id', sql.String(length=64)), sql.Column('user_id', sql.String(length=64)), mysql_engine='InnoDB', mysql_charset='utf8') trust = sql.Table( 'trust', meta, sql.Column('id', sql.String(length=64), primary_key=True), sql.Column('trustor_user_id', sql.String(length=64), nullable=False), sql.Column('trustee_user_id', sql.String(length=64), nullable=False), sql.Column('project_id', sql.String(length=64)), sql.Column('impersonation', sql.Boolean, nullable=False), sql.Column('deleted_at', sql.DateTime), sql.Column('expires_at', sql.DateTime), sql.Column('remaining_uses', sql.Integer, nullable=True), sql.Column('extra', ks_sql.JsonBlob.impl), mysql_engine='InnoDB', mysql_charset='utf8') trust_role = sql.Table( 'trust_role', meta, sql.Column('trust_id', sql.String(length=64), primary_key=True, nullable=False), sql.Column('role_id', sql.String(length=64), primary_key=True, nullable=False), mysql_engine='InnoDB', mysql_charset='utf8') user = sql.Table( 'user', meta, sql.Column('id', sql.String(length=64), primary_key=True), sql.Column('name', sql.String(length=255), nullable=False), sql.Column('extra', ks_sql.JsonBlob.impl), sql.Column('password', sql.String(length=128)), sql.Column('enabled', sql.Boolean), sql.Column('domain_id', sql.String(length=64), nullable=False), sql.Column('default_project_id', sql.String(length=64)), mysql_engine='InnoDB', mysql_charset='utf8') user_group_membership = sql.Table( 'user_group_membership', meta, sql.Column('user_id', sql.String(length=64), primary_key=True), sql.Column('group_id', sql.String(length=64), primary_key=True), mysql_engine='InnoDB', mysql_charset='utf8') region = sql.Table( 'region', meta, sql.Column('id', sql.String(255), primary_key=True), sql.Column('description', sql.String(255), nullable=False), sql.Column('parent_region_id', sql.String(255), nullable=True), sql.Column('extra', sql.Text()), mysql_engine='InnoDB', mysql_charset='utf8') assignment = sql.Table( 'assignment', meta, sql.Column('type', sql.Enum( assignment_sql.AssignmentType.USER_PROJECT, assignment_sql.AssignmentType.GROUP_PROJECT, assignment_sql.AssignmentType.USER_DOMAIN, assignment_sql.AssignmentType.GROUP_DOMAIN, name='type'), nullable=False), sql.Column('actor_id', sql.String(64), nullable=False), sql.Column('target_id', sql.String(64), nullable=False), sql.Column('role_id', sql.String(64), nullable=False), sql.Column('inherited', sql.Boolean, default=False, nullable=False), sql.PrimaryKeyConstraint('type', 'actor_id', 'target_id', 'role_id'), mysql_engine='InnoDB', mysql_charset='utf8') mapping = sql.Table( 'id_mapping', meta, sql.Column('public_id', sql.String(64), primary_key=True), sql.Column('domain_id', sql.String(64), nullable=False), sql.Column('local_id', sql.String(64), nullable=False), sql.Column('entity_type', sql.Enum( mapping_backend.EntityType.USER, mapping_backend.EntityType.GROUP, name='entity_type'), nullable=False), mysql_engine='InnoDB', mysql_charset='utf8') domain_config_whitelist = sql.Table( 'whitelisted_config', meta, sql.Column('domain_id', sql.String(64), primary_key=True), sql.Column('group', sql.String(255), primary_key=True), sql.Column('option', sql.String(255), primary_key=True), sql.Column('value', ks_sql.JsonBlob.impl, nullable=False), mysql_engine='InnoDB', mysql_charset='utf8') domain_config_sensitive = sql.Table( 'sensitive_config', meta, sql.Column('domain_id', sql.String(64), primary_key=True), sql.Column('group', sql.String(255), primary_key=True), sql.Column('option', sql.String(255), primary_key=True), sql.Column('value', ks_sql.JsonBlob.impl, nullable=False), mysql_engine='InnoDB', mysql_charset='utf8') # create all tables tables = [credential, domain, endpoint, group, policy, project, role, service, token, trust, trust_role, user, user_group_membership, region, assignment, mapping, domain_config_whitelist, domain_config_sensitive] for table in tables: try: table.create() except Exception: LOG.exception('Exception while creating table: %r', table) raise # Unique Constraints migrate.UniqueConstraint(user.c.domain_id, user.c.name, name='ixu_user_name_domain_id').create() migrate.UniqueConstraint(group.c.domain_id, group.c.name, name='ixu_group_name_domain_id').create() migrate.UniqueConstraint(role.c.name, name='ixu_role_name').create() migrate.UniqueConstraint(project.c.domain_id, project.c.name, name='ixu_project_name_domain_id').create() migrate.UniqueConstraint(domain.c.name, name='ixu_domain_name').create() migrate.UniqueConstraint(mapping.c.domain_id, mapping.c.local_id, mapping.c.entity_type, name='domain_id').create() # Indexes sql.Index('ix_token_expires', token.c.expires).create() sql.Index('ix_token_expires_valid', token.c.expires, token.c.valid).create() sql.Index('ix_actor_id', assignment.c.actor_id).create() sql.Index('ix_token_user_id', token.c.user_id).create() sql.Index('ix_token_trust_id', token.c.trust_id).create() # NOTE(stevemar): The two indexes below were named 'service_id' and # 'group_id' in 050_fk_consistent_indexes.py, and need to be preserved sql.Index('service_id', endpoint.c.service_id).create() sql.Index('group_id', user_group_membership.c.group_id).create() fkeys = [ {'columns': [endpoint.c.service_id], 'references': [service.c.id]}, {'columns': [user_group_membership.c.group_id], 'references': [group.c.id], 'name': 'fk_user_group_membership_group_id'}, {'columns': [user_group_membership.c.user_id], 'references':[user.c.id], 'name': 'fk_user_group_membership_user_id'}, {'columns': [project.c.domain_id], 'references': [domain.c.id], 'name': 'fk_project_domain_id'}, {'columns': [endpoint.c.region_id], 'references': [region.c.id], 'name': 'fk_endpoint_region_id'}, {'columns': [project.c.parent_id], 'references': [project.c.id], 'name': 'project_parent_id_fkey'}, ] if migrate_engine.name == 'sqlite': # NOTE(stevemar): We need to keep this FK constraint due to 073, but # only for sqlite, once we collapse 073 we can remove this constraint fkeys.append( {'columns': [assignment.c.role_id], 'references': [role.c.id], 'name': 'fk_assignment_role_id'}) for fkey in fkeys: migrate.ForeignKeyConstraint(columns=fkey['columns'], refcolumns=fkey['references'], name=fkey.get('name')).create() ././@LongLink0000000000000000000000000000015700000000000011220 Lustar 00000000000000keystone-9.0.0/keystone/common/sql/migrate_repo/versions/091_migrate_data_to_local_user_and_password_tables.pykeystone-9.0.0/keystone/common/sql/migrate_repo/versions/091_migrate_data_to_local_user_and_password0000664000567000056710000000517412701407102035413 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import migrate import sqlalchemy as sql from sqlalchemy import func def upgrade(migrate_engine): meta = sql.MetaData() meta.bind = migrate_engine user_table = sql.Table('user', meta, autoload=True) local_user_table = sql.Table('local_user', meta, autoload=True) password_table = sql.Table('password', meta, autoload=True) # migrate data to local_user table local_user_values = [] for row in user_table.select().execute(): # skip the row that already exists in `local_user`, this could # happen if run into a partially-migrated table due to the # bug #1549705. filter_by = local_user_table.c.user_id == row['id'] user_count = sql.select([func.count()]).select_from( local_user_table).where(filter_by).execute().fetchone()[0] if user_count == 0: local_user_values.append({'user_id': row['id'], 'domain_id': row['domain_id'], 'name': row['name']}) if local_user_values: local_user_table.insert().values(local_user_values).execute() # migrate data to password table sel = ( sql.select([user_table, local_user_table], use_labels=True) .select_from(user_table.join(local_user_table, user_table.c.id == local_user_table.c.user_id)) ) user_rows = sel.execute() password_values = [] for row in user_rows: if row['user_password']: password_values.append({'local_user_id': row['local_user_id'], 'password': row['user_password']}) if password_values: password_table.insert().values(password_values).execute() # remove domain_id and name unique constraint if migrate_engine.name != 'sqlite': migrate.UniqueConstraint(user_table.c.domain_id, user_table.c.name, name='ixu_user_name_domain_id').drop() # drop user columns user_table.c.domain_id.drop() user_table.c.name.drop() user_table.c.password.drop() keystone-9.0.0/keystone/common/sql/migrate_repo/versions/074_add_is_domain_project.py0000664000567000056710000000173412701407102032242 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy as sql _PROJECT_TABLE_NAME = 'project' _IS_DOMAIN_COLUMN_NAME = 'is_domain' def upgrade(migrate_engine): meta = sql.MetaData() meta.bind = migrate_engine project_table = sql.Table(_PROJECT_TABLE_NAME, meta, autoload=True) is_domain = sql.Column(_IS_DOMAIN_COLUMN_NAME, sql.Boolean, nullable=False, server_default='0', default=False) project_table.create_column(is_domain) keystone-9.0.0/keystone/common/sql/migrate_repo/versions/068_placeholder.py0000664000567000056710000000136412701407102030226 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This is a placeholder for Kilo backports. Do not use this number for new # Liberty work. New Liberty work starts after all the placeholders. def upgrade(migrate_engine): pass keystone-9.0.0/keystone/common/sql/migrate_repo/versions/080_placeholder.py0000664000567000056710000000136512701407102030221 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This is a placeholder for Liberty backports. Do not use this number for new # Mitaka work. New Mitaka work starts after all the placeholders. def upgrade(migrate_engine): pass keystone-9.0.0/keystone/common/sql/migrate_repo/versions/078_placeholder.py0000664000567000056710000000136512701407102030230 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This is a placeholder for Liberty backports. Do not use this number for new # Mitaka work. New Mitaka work starts after all the placeholders. def upgrade(migrate_engine): pass keystone-9.0.0/keystone/common/sql/migrate_repo/versions/096_drop_role_name_constraint.py0000664000567000056710000000347612701407102033204 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import migrate import sqlalchemy as sql _ROLE_TABLE_NAME = 'role' _ROLE_NAME_COLUMN_NAME = 'name' def upgrade(migrate_engine): meta = sql.MetaData() meta.bind = migrate_engine role_table = sql.Table(_ROLE_TABLE_NAME, meta, autoload=True) # NOTE(morganfainberg): the `role_name` unique constraint is not # guaranteed to be named 'ixu_role_name', so we need to search for the # correct constraint that only affects role_table.c.name and drop # that constraint. # # This is an idempotent change that reflects the fix to migration # 88 if the role_name unique constraint was not named consistently and # someone manually fixed the migrations / db without dropping the # old constraint. to_drop = None if migrate_engine.name == 'mysql': for c in role_table.indexes: if (c.unique and len(c.columns) == 1 and _ROLE_NAME_COLUMN_NAME in c.columns): to_drop = c break else: for c in role_table.constraints: if len(c.columns) == 1 and _ROLE_NAME_COLUMN_NAME in c.columns: to_drop = c break if to_drop is not None: migrate.UniqueConstraint(role_table.c.name, name=to_drop.name).drop() keystone-9.0.0/keystone/common/sql/migrate_repo/versions/086_add_duplicate_constraint_trusts.py0000664000567000056710000000201212701407102034405 0ustar jenkinsjenkins00000000000000# Copyright 2015 Intel Corporation # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from migrate import UniqueConstraint from sqlalchemy import MetaData, Table def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) trusts = Table('trust', meta, autoload=True) UniqueConstraint('trustor_user_id', 'trustee_user_id', 'project_id', 'impersonation', 'expires_at', table=trusts, name='duplicate_trust_constraint').create() keystone-9.0.0/keystone/common/sql/migrate_repo/versions/072_placeholder.py0000664000567000056710000000136412701407102030221 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This is a placeholder for Kilo backports. Do not use this number for new # Liberty work. New Liberty work starts after all the placeholders. def upgrade(migrate_engine): pass keystone-9.0.0/keystone/common/sql/migrate_repo/versions/069_placeholder.py0000664000567000056710000000136412701407102030227 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This is a placeholder for Kilo backports. Do not use this number for new # Liberty work. New Liberty work starts after all the placeholders. def upgrade(migrate_engine): pass keystone-9.0.0/keystone/common/sql/migrate_repo/versions/073_insert_assignment_inherited_pk.py0000664000567000056710000001166312701407102034224 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import migrate import sqlalchemy as sql from sqlalchemy.orm import sessionmaker from keystone.assignment.backends import sql as assignment_sql def upgrade(migrate_engine): """Inserts inherited column to assignment table PK constraints. For non-SQLite databases, it changes the constraint in the existing table. For SQLite, since changing constraints is not supported, it recreates the assignment table with the new PK constraint and migrates the existing data. """ ASSIGNMENT_TABLE_NAME = 'assignment' metadata = sql.MetaData() metadata.bind = migrate_engine # Retrieve the existing assignment table assignment_table = sql.Table(ASSIGNMENT_TABLE_NAME, metadata, autoload=True) if migrate_engine.name == 'sqlite': ACTOR_ID_INDEX_NAME = 'ix_actor_id' TMP_ASSIGNMENT_TABLE_NAME = 'tmp_assignment' # Define the new assignment table with a temporary name new_assignment_table = sql.Table( TMP_ASSIGNMENT_TABLE_NAME, metadata, sql.Column('type', sql.Enum( assignment_sql.AssignmentType.USER_PROJECT, assignment_sql.AssignmentType.GROUP_PROJECT, assignment_sql.AssignmentType.USER_DOMAIN, assignment_sql.AssignmentType.GROUP_DOMAIN, name='type'), nullable=False), sql.Column('actor_id', sql.String(64), nullable=False), sql.Column('target_id', sql.String(64), nullable=False), sql.Column('role_id', sql.String(64), sql.ForeignKey('role.id'), nullable=False), sql.Column('inherited', sql.Boolean, default=False, nullable=False), sql.PrimaryKeyConstraint('type', 'actor_id', 'target_id', 'role_id', 'inherited'), mysql_engine='InnoDB', mysql_charset='utf8') # Create the new assignment table new_assignment_table.create(migrate_engine, checkfirst=True) # Change the index from the existing assignment table to the new one sql.Index(ACTOR_ID_INDEX_NAME, assignment_table.c.actor_id).drop() sql.Index(ACTOR_ID_INDEX_NAME, new_assignment_table.c.actor_id).create() # Instantiate session maker = sessionmaker(bind=migrate_engine) session = maker() # Migrate existing data insert = new_assignment_table.insert().from_select( assignment_table.c, select=session.query(assignment_table)) session.execute(insert) session.commit() # Drop the existing assignment table, in favor of the new one assignment_table.deregister() assignment_table.drop() # Finally, rename the new table to the original assignment table name new_assignment_table.rename(ASSIGNMENT_TABLE_NAME) elif migrate_engine.name == 'ibm_db_sa': # Recreate the existing constraint, marking the inherited column as PK # for DB2. # This is a workaround to the general case in the else statement below. # Due to a bug in the DB2 sqlalchemy dialect, Column.alter() actually # creates a primary key over only the "inherited" column. This is wrong # because the primary key for the table actually covers other columns # too, not just the "inherited" column. Since the primary key already # exists for the table after the Column.alter() call, it causes the # next line to fail with an error that the primary key already exists. # The workaround here skips doing the Column.alter(). This causes a # warning message since the metadata is out of sync. We can remove this # workaround once the DB2 sqlalchemy dialect is fixed. # DB2 Issue: https://code.google.com/p/ibm-db/issues/detail?id=173 migrate.PrimaryKeyConstraint(table=assignment_table).drop() migrate.PrimaryKeyConstraint( assignment_table.c.type, assignment_table.c.actor_id, assignment_table.c.target_id, assignment_table.c.role_id, assignment_table.c.inherited).create() else: # Recreate the existing constraint, marking the inherited column as PK migrate.PrimaryKeyConstraint(table=assignment_table).drop() assignment_table.c.inherited.alter(primary_key=True) migrate.PrimaryKeyConstraint(table=assignment_table).create() keystone-9.0.0/keystone/common/sql/migrate_repo/versions/079_placeholder.py0000664000567000056710000000136512701407102030231 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This is a placeholder for Liberty backports. Do not use this number for new # Mitaka work. New Mitaka work starts after all the placeholders. def upgrade(migrate_engine): pass keystone-9.0.0/keystone/common/sql/migrate_repo/versions/081_add_endpoint_policy_table.py0000664000567000056710000000354212701407102033115 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy as sql from keystone.common.sql import migration_helpers def upgrade(migrate_engine): try: extension_version = migration_helpers.get_db_version( extension='endpoint_policy', engine=migrate_engine) except Exception: extension_version = 0 # This migration corresponds to endpoint_policy extension migration 1. Only # update if it has not been run. if extension_version >= 1: return # Upgrade operations go here. Don't create your own engine; bind # migrate_engine to your metadata meta = sql.MetaData() meta.bind = migrate_engine endpoint_policy_table = sql.Table( 'policy_association', meta, sql.Column('id', sql.String(64), primary_key=True), sql.Column('policy_id', sql.String(64), nullable=False), sql.Column('endpoint_id', sql.String(64), nullable=True), sql.Column('service_id', sql.String(64), nullable=True), sql.Column('region_id', sql.String(64), nullable=True), sql.UniqueConstraint('endpoint_id', 'service_id', 'region_id'), mysql_engine='InnoDB', mysql_charset='utf8') endpoint_policy_table.create(migrate_engine, checkfirst=True) keystone-9.0.0/keystone/common/sql/migrate_repo/versions/076_placeholder.py0000664000567000056710000000136512701407102030226 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This is a placeholder for Liberty backports. Do not use this number for new # Mitaka work. New Mitaka work starts after all the placeholders. def upgrade(migrate_engine): pass keystone-9.0.0/keystone/common/sql/migrate_repo/versions/075_confirm_config_registration.py0000664000567000056710000000204012701407102033506 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy as sql REGISTRATION_TABLE = 'config_register' def upgrade(migrate_engine): meta = sql.MetaData() meta.bind = migrate_engine registration_table = sql.Table( REGISTRATION_TABLE, meta, sql.Column('type', sql.String(64), primary_key=True), sql.Column('domain_id', sql.String(64), nullable=False), mysql_engine='InnoDB', mysql_charset='utf8') registration_table.create(migrate_engine, checkfirst=True) keystone-9.0.0/keystone/common/sql/migrate_repo/manage.py0000664000567000056710000000016412701407102024724 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python from migrate.versioning.shell import main if __name__ == '__main__': main(debug='False') keystone-9.0.0/keystone/common/sql/migrate_repo/__init__.py0000664000567000056710000000000012701407102025220 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/common/sql/migrate_repo/migrate.cfg0000664000567000056710000000231412701407102025232 0ustar jenkinsjenkins00000000000000[db_settings] # Used to identify which repository this database is versioned under. # You can use the name of your project. repository_id=keystone # The name of the database table used to track the schema version. # This name shouldn't already be used by your project. # If this is changed once a database is under version control, you'll need to # change the table name in each database too. version_table=migrate_version # When committing a change script, Migrate will attempt to generate the # sql for all supported databases; normally, if one of them fails - probably # because you don't have that database installed - it is ignored and the # commit continues, perhaps ending successfully. # Databases in this list MUST compile successfully during a commit, or the # entire commit will fail. List the databases your application will actually # be using to ensure your updates to that database work properly. # This must be a list; example: ['postgres','sqlite'] required_dbs=[] # When creating new change scripts, Migrate will stamp the new script with # a version number. By default this is latest_version + 1. You can set this # to 'true' to tell Migrate to use the UTC timestamp instead. use_timestamp_numbering=False keystone-9.0.0/keystone/common/sql/__init__.py0000664000567000056710000000117112701407102022555 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common.sql.core import * # noqa keystone-9.0.0/keystone/common/sql/core.py0000664000567000056710000003470112701407105021756 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """SQL backends for the various services. Before using this module, call initialize(). This has to be done before CONF() because it sets up configuration options. """ import functools from oslo_config import cfg from oslo_db import exception as db_exception from oslo_db import options as db_options from oslo_db.sqlalchemy import enginefacade from oslo_db.sqlalchemy import models from oslo_log import log from oslo_serialization import jsonutils import six import sqlalchemy as sql from sqlalchemy.ext import declarative from sqlalchemy.orm.attributes import flag_modified, InstrumentedAttribute from sqlalchemy import types as sql_types from keystone.common import driver_hints from keystone.common import utils from keystone import exception from keystone.i18n import _ CONF = cfg.CONF LOG = log.getLogger(__name__) ModelBase = declarative.declarative_base() # For exporting to other modules Column = sql.Column Index = sql.Index String = sql.String Integer = sql.Integer Enum = sql.Enum ForeignKey = sql.ForeignKey DateTime = sql.DateTime IntegrityError = sql.exc.IntegrityError DBDuplicateEntry = db_exception.DBDuplicateEntry OperationalError = sql.exc.OperationalError NotFound = sql.orm.exc.NoResultFound Boolean = sql.Boolean Text = sql.Text UniqueConstraint = sql.UniqueConstraint PrimaryKeyConstraint = sql.PrimaryKeyConstraint joinedload = sql.orm.joinedload # Suppress flake8's unused import warning for flag_modified: flag_modified = flag_modified def initialize(): """Initialize the module.""" db_options.set_defaults( CONF, connection="sqlite:///keystone.db") def initialize_decorator(init): """Ensure that the length of string field do not exceed the limit. This decorator check the initialize arguments, to make sure the length of string field do not exceed the length limit, or raise a 'StringLengthExceeded' exception. Use decorator instead of inheritance, because the metaclass will check the __tablename__, primary key columns, etc. at the class definition. """ def initialize(self, *args, **kwargs): cls = type(self) for k, v in kwargs.items(): if hasattr(cls, k): attr = getattr(cls, k) if isinstance(attr, InstrumentedAttribute): column = attr.property.columns[0] if isinstance(column.type, String): if not isinstance(v, six.text_type): v = six.text_type(v) if column.type.length and column.type.length < len(v): raise exception.StringLengthExceeded( string=v, type=k, length=column.type.length) init(self, *args, **kwargs) return initialize ModelBase.__init__ = initialize_decorator(ModelBase.__init__) # Special Fields class JsonBlob(sql_types.TypeDecorator): impl = sql.Text def process_bind_param(self, value, dialect): return jsonutils.dumps(value) def process_result_value(self, value, dialect): return jsonutils.loads(value) class DictBase(models.ModelBase): attributes = [] @classmethod def from_dict(cls, d): new_d = d.copy() new_d['extra'] = {k: new_d.pop(k) for k in six.iterkeys(d) if k not in cls.attributes and k != 'extra'} return cls(**new_d) def to_dict(self, include_extra_dict=False): """Returns the model's attributes as a dictionary. If include_extra_dict is True, 'extra' attributes are literally included in the resulting dictionary twice, for backwards-compatibility with a broken implementation. """ d = self.extra.copy() for attr in self.__class__.attributes: d[attr] = getattr(self, attr) if include_extra_dict: d['extra'] = self.extra.copy() return d def __getitem__(self, key): if key in self.extra: return self.extra[key] return getattr(self, key) class ModelDictMixin(object): @classmethod def from_dict(cls, d): """Returns a model instance from a dictionary.""" return cls(**d) def to_dict(self): """Returns the model's attributes as a dictionary.""" names = (column.name for column in self.__table__.columns) return {name: getattr(self, name) for name in names} _main_context_manager = None def _get_main_context_manager(): global _main_context_manager if not _main_context_manager: _main_context_manager = enginefacade.transaction_context() return _main_context_manager def cleanup(): global _main_context_manager _main_context_manager = None _CONTEXT = None def _get_context(): global _CONTEXT if _CONTEXT is None: # NOTE(dims): Delay the `threading.local` import to allow for # eventlet/gevent monkeypatching to happen import threading _CONTEXT = threading.local() return _CONTEXT def session_for_read(): return _get_main_context_manager().reader.using(_get_context()) def session_for_write(): return _get_main_context_manager().writer.using(_get_context()) def truncated(f): return driver_hints.truncated(f) class _WontMatch(Exception): """Raised to indicate that the filter won't match. This is raised to short-circuit the computation of the filter as soon as it's discovered that the filter requested isn't going to match anything. A filter isn't going to match anything if the value is too long for the field, for example. """ @classmethod def check(cls, value, col_attr): """Check if the value can match given the column attributes. Raises this class if the value provided can't match any value in the column in the table given the column's attributes. For example, if the column is a string and the value is longer than the column then it won't match any value in the column in the table. """ col = col_attr.property.columns[0] if isinstance(col.type, sql.types.Boolean): # The column is a Boolean, we should have already validated input. return if not col.type.length: # The column doesn't have a length so can't validate anymore. return if len(value) > col.type.length: raise cls() # Otherwise the value could match a value in the column. def _filter(model, query, hints): """Applies filtering to a query. :param model: the table model in question :param query: query to apply filters to :param hints: contains the list of filters yet to be satisfied. Any filters satisfied here will be removed so that the caller will know if any filters remain. :returns query: query, updated with any filters satisfied """ def inexact_filter(model, query, filter_, satisfied_filters): """Applies an inexact filter to a query. :param model: the table model in question :param query: query to apply filters to :param dict filter_: describes this filter :param list satisfied_filters: filter_ will be added if it is satisfied. :returns query: query updated to add any inexact filters we could satisfy """ column_attr = getattr(model, filter_['name']) # TODO(henry-nash): Sqlalchemy 0.7 defaults to case insensitivity # so once we find a way of changing that (maybe on a call-by-call # basis), we can add support for the case sensitive versions of # the filters below. For now, these case sensitive versions will # be handled at the controller level. if filter_['case_sensitive']: return query if filter_['comparator'] == 'contains': _WontMatch.check(filter_['value'], column_attr) query_term = column_attr.ilike('%%%s%%' % filter_['value']) elif filter_['comparator'] == 'startswith': _WontMatch.check(filter_['value'], column_attr) query_term = column_attr.ilike('%s%%' % filter_['value']) elif filter_['comparator'] == 'endswith': _WontMatch.check(filter_['value'], column_attr) query_term = column_attr.ilike('%%%s' % filter_['value']) else: # It's a filter we don't understand, so let the caller # work out if they need to do something with it. return query satisfied_filters.append(filter_) return query.filter(query_term) def exact_filter(model, query, filter_, satisfied_filters): """Applies an exact filter to a query. :param model: the table model in question :param query: query to apply filters to :param dict filter_: describes this filter :param list satisfied_filters: filter_ will be added if it is satisfied. :returns query: query updated to add any exact filters we could satisfy """ key = filter_['name'] col = getattr(model, key) if isinstance(col.property.columns[0].type, sql.types.Boolean): filter_val = utils.attr_as_boolean(filter_['value']) else: _WontMatch.check(filter_['value'], col) filter_val = filter_['value'] satisfied_filters.append(filter_) return query.filter(col == filter_val) try: satisfied_filters = [] for filter_ in hints.filters: if filter_['name'] not in model.attributes: continue if filter_['comparator'] == 'equals': query = exact_filter(model, query, filter_, satisfied_filters) else: query = inexact_filter(model, query, filter_, satisfied_filters) # Remove satisfied filters, then the caller will know remaining filters for filter_ in satisfied_filters: hints.filters.remove(filter_) return query except _WontMatch: hints.cannot_match = True return def _limit(query, hints): """Applies a limit to a query. :param query: query to apply filters to :param hints: contains the list of filters and limit details. :returns: updated query """ # NOTE(henry-nash): If we were to implement pagination, then we # we would expand this method to support pagination and limiting. # If we satisfied all the filters, set an upper limit if supplied if hints.limit: query = query.limit(hints.limit['limit']) return query def filter_limit_query(model, query, hints): """Applies filtering and limit to a query. :param model: table model :param query: query to apply filters to :param hints: contains the list of filters and limit details. This may be None, indicating that there are no filters or limits to be applied. If it's not None, then any filters satisfied here will be removed so that the caller will know if any filters remain. :returns: updated query """ if hints is None: return query # First try and satisfy any filters query = _filter(model, query, hints) if hints.cannot_match: # Nothing's going to match, so don't bother with the query. return [] # NOTE(henry-nash): Any unsatisfied filters will have been left in # the hints list for the controller to handle. We can only try and # limit here if all the filters are already satisfied since, if not, # doing so might mess up the final results. If there are still # unsatisfied filters, we have to leave any limiting to the controller # as well. if not hints.filters: return _limit(query, hints) else: return query def handle_conflicts(conflict_type='object'): """Converts select sqlalchemy exceptions into HTTP 409 Conflict.""" _conflict_msg = 'Conflict %(conflict_type)s: %(details)s' def decorator(method): @functools.wraps(method) def wrapper(*args, **kwargs): try: return method(*args, **kwargs) except db_exception.DBDuplicateEntry as e: # LOG the exception for debug purposes, do not send the # exception details out with the raised Conflict exception # as it can contain raw SQL. LOG.debug(_conflict_msg, {'conflict_type': conflict_type, 'details': six.text_type(e)}) raise exception.Conflict(type=conflict_type, details=_('Duplicate Entry')) except db_exception.DBError as e: # TODO(blk-u): inspecting inner_exception breaks encapsulation; # oslo_db should provide exception we need. if isinstance(e.inner_exception, IntegrityError): # LOG the exception for debug purposes, do not send the # exception details out with the raised Conflict exception # as it can contain raw SQL. LOG.debug(_conflict_msg, {'conflict_type': conflict_type, 'details': six.text_type(e)}) # NOTE(morganfainberg): This is really a case where the SQL # failed to store the data. This is not something that the # user has done wrong. Example would be a ForeignKey is # missing; the code that is executed before reaching the # SQL writing to the DB should catch the issue. raise exception.UnexpectedError( _('An unexpected error occurred when trying to ' 'store %s') % conflict_type) raise return wrapper return decorator keystone-9.0.0/keystone/common/sql/migration_helpers.py0000664000567000056710000002101612701407102024531 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import sys import migrate from migrate import exceptions from oslo_config import cfg from oslo_db.sqlalchemy import migration from oslo_utils import importutils import six import sqlalchemy from keystone.common import sql from keystone import contrib from keystone import exception from keystone.i18n import _ CONF = cfg.CONF DEFAULT_EXTENSIONS = [] MIGRATED_EXTENSIONS = ['endpoint_policy', 'federation', 'oauth1', 'revoke', 'endpoint_filter' ] # Different RDBMSs use different schemes for naming the Foreign Key # Constraints. SQLAlchemy does not yet attempt to determine the name # for the constraint, and instead attempts to deduce it from the column. # This fails on MySQL. def get_constraints_names(table, column_name): fkeys = [fk.name for fk in table.constraints if (isinstance(fk, sqlalchemy.ForeignKeyConstraint) and column_name in fk.columns)] return fkeys # remove_constraints and add_constraints both accept a list of dictionaries # that contain: # {'table': a sqlalchemy table. The constraint is added to dropped from # this table. # 'fk_column': the name of a column on the above table, The constraint # is added to or dropped from this column # 'ref_column':a sqlalchemy column object. This is the reference column # for the constraint. def remove_constraints(constraints): for constraint_def in constraints: constraint_names = get_constraints_names(constraint_def['table'], constraint_def['fk_column']) for constraint_name in constraint_names: migrate.ForeignKeyConstraint( columns=[getattr(constraint_def['table'].c, constraint_def['fk_column'])], refcolumns=[constraint_def['ref_column']], name=constraint_name).drop() def add_constraints(constraints): for constraint_def in constraints: if constraint_def['table'].kwargs.get('mysql_engine') == 'MyISAM': # Don't try to create constraint when using MyISAM because it's # not supported. continue ref_col = constraint_def['ref_column'] ref_engine = ref_col.table.kwargs.get('mysql_engine') if ref_engine == 'MyISAM': # Don't try to create constraint when using MyISAM because it's # not supported. continue migrate.ForeignKeyConstraint( columns=[getattr(constraint_def['table'].c, constraint_def['fk_column'])], refcolumns=[constraint_def['ref_column']]).create() def rename_tables_with_constraints(renames, constraints, engine): """Renames tables with foreign key constraints. Tables are renamed after first removing constraints. The constraints are replaced after the rename is complete. This works on databases that don't support renaming tables that have constraints on them (DB2). `renames` is a dict, mapping {'to_table_name': from_table, ...} """ if engine.name != 'sqlite': # SQLite doesn't support constraints, so nothing to remove. remove_constraints(constraints) for to_table_name in renames: from_table = renames[to_table_name] from_table.rename(to_table_name) if engine != 'sqlite': add_constraints(constraints) def find_migrate_repo(package=None, repo_name='migrate_repo'): package = package or sql path = os.path.abspath(os.path.join( os.path.dirname(package.__file__), repo_name)) if os.path.isdir(path): return path raise exception.MigrationNotProvided(package.__name__, path) def _sync_common_repo(version): abs_path = find_migrate_repo() init_version = get_init_version() with sql.session_for_write() as session: engine = session.get_bind() _assert_not_schema_downgrade(version=version) migration.db_sync(engine, abs_path, version=version, init_version=init_version, sanity_check=False) def get_init_version(abs_path=None): """Get the initial version of a migrate repository :param abs_path: Absolute path to migrate repository. :return: initial version number or None, if DB is empty. """ if abs_path is None: abs_path = find_migrate_repo() repo = migrate.versioning.repository.Repository(abs_path) # Sadly, Repository has a `latest` but not an `oldest`. # The value is a VerNum object which needs to be converted into an int. oldest = int(min(repo.versions.versions)) if oldest < 1: return None # The initial version is one less return oldest - 1 def _assert_not_schema_downgrade(extension=None, version=None): if version is not None: try: current_ver = int(six.text_type(get_db_version(extension))) if int(version) < current_ver: raise migration.exception.DbMigrationError( _("Unable to downgrade schema")) except exceptions.DatabaseNotControlledError: # nosec # NOTE(morganfainberg): The database is not controlled, this action # cannot be a downgrade. pass def _sync_extension_repo(extension, version): if extension in MIGRATED_EXTENSIONS: raise exception.MigrationMovedFailure(extension=extension) with sql.session_for_write() as session: engine = session.get_bind() try: package_name = '.'.join((contrib.__name__, extension)) package = importutils.import_module(package_name) except ImportError: raise ImportError(_("%s extension does not exist.") % package_name) try: abs_path = find_migrate_repo(package) try: migration.db_version_control(engine, abs_path) # Register the repo with the version control API # If it already knows about the repo, it will throw # an exception that we can safely ignore except exceptions.DatabaseAlreadyControlledError: # nosec pass except exception.MigrationNotProvided as e: print(e) sys.exit(1) _assert_not_schema_downgrade(extension=extension, version=version) init_version = get_init_version(abs_path=abs_path) migration.db_sync(engine, abs_path, version=version, init_version=init_version, sanity_check=False) def sync_database_to_version(extension=None, version=None): if not extension: _sync_common_repo(version) # If version is greater than 0, it is for the common # repository only, and only that will be synchronized. if version is None: for default_extension in DEFAULT_EXTENSIONS: _sync_extension_repo(default_extension, version) else: _sync_extension_repo(extension, version) def get_db_version(extension=None): if not extension: with sql.session_for_write() as session: return migration.db_version(session.get_bind(), find_migrate_repo(), get_init_version()) try: package_name = '.'.join((contrib.__name__, extension)) package = importutils.import_module(package_name) except ImportError: raise ImportError(_("%s extension does not exist.") % package_name) with sql.session_for_write() as session: return migration.db_version( session.get_bind(), find_migrate_repo(package), 0) def print_db_version(extension=None): try: db_version = get_db_version(extension=extension) print(db_version) except exception.MigrationNotProvided as e: print(e) sys.exit(1) keystone-9.0.0/keystone/common/models.py0000664000567000056710000000674312701407105021517 0ustar jenkinsjenkins00000000000000# Copyright (C) 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Base model for keystone internal services Unless marked otherwise, all fields are strings. """ class Model(dict): """Base model class.""" def __hash__(self): return self['id'].__hash__() @property def known_keys(cls): return cls.required_keys + cls.optional_keys class Token(Model): """Token object. Required keys: id expires (datetime) Optional keys: user tenant metadata trust_id """ required_keys = ('id', 'expires') optional_keys = ('extra',) class Service(Model): """Service object. Required keys: id type name Optional keys: """ required_keys = ('id', 'type', 'name') optional_keys = tuple() class Endpoint(Model): """Endpoint object Required keys: id region service_id Optional keys: internalurl publicurl adminurl """ required_keys = ('id', 'region', 'service_id') optional_keys = ('internalurl', 'publicurl', 'adminurl') class User(Model): """User object. Required keys: id name domain_id Optional keys: password description email enabled (bool, default True) default_project_id """ required_keys = ('id', 'name', 'domain_id') optional_keys = ('password', 'description', 'email', 'enabled', 'default_project_id') class Group(Model): """Group object. Required keys: id name domain_id Optional keys: description """ required_keys = ('id', 'name', 'domain_id') optional_keys = ('description',) class Project(Model): """Project object. Required keys: id name domain_id Optional Keys: description enabled (bool, default True) is_domain (bool, default False) """ required_keys = ('id', 'name', 'domain_id') optional_keys = ('description', 'enabled', 'is_domain') class Role(Model): """Role object. Required keys: id name """ required_keys = ('id', 'name') optional_keys = tuple() class ImpliedRole(Model): """ImpliedRole object. Required keys: prior_role_id implied_role_id """ required_keys = ('prior_role_id', 'implied_role_id') optional_keys = tuple() class Trust(Model): """Trust object. Required keys: id trustor_user_id trustee_user_id project_id """ required_keys = ('id', 'trustor_user_id', 'trustee_user_id', 'project_id') optional_keys = ('expires_at',) class Domain(Model): """Domain object. Required keys: id name Optional keys: description enabled (bool, default True) """ required_keys = ('id', 'name') optional_keys = ('description', 'enabled') keystone-9.0.0/keystone/common/kvs/0000775000567000056710000000000012701407246020461 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/common/kvs/backends/0000775000567000056710000000000012701407246022233 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/common/kvs/backends/__init__.py0000664000567000056710000000000012701407102024321 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/common/kvs/backends/inmemdb.py0000664000567000056710000000355512701407102024217 0ustar jenkinsjenkins00000000000000# Copyright 2013 Metacloud, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Keystone In-Memory Dogpile.cache backend implementation.""" import copy from dogpile.cache import api NO_VALUE = api.NO_VALUE class MemoryBackend(api.CacheBackend): """A backend that uses a plain dictionary. There is no size management, and values which are placed into the dictionary will remain until explicitly removed. Note that Dogpile's expiration of items is based on timestamps and does not remove them from the cache. E.g.:: from dogpile.cache import make_region region = make_region().configure( 'keystone.common.kvs.Memory' ) """ def __init__(self, arguments): self._db = {} def _isolate_value(self, value): if value is not NO_VALUE: return copy.deepcopy(value) return value def get(self, key): return self._isolate_value(self._db.get(key, NO_VALUE)) def get_multi(self, keys): return [self.get(key) for key in keys] def set(self, key, value): self._db[key] = self._isolate_value(value) def set_multi(self, mapping): for key, value in mapping.items(): self.set(key, value) def delete(self, key): self._db.pop(key, None) def delete_multi(self, keys): for key in keys: self.delete(key) keystone-9.0.0/keystone/common/kvs/backends/memcached.py0000664000567000056710000001650112701407102024505 0ustar jenkinsjenkins00000000000000# Copyright 2013 Metacloud, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Keystone Memcached dogpile.cache backend implementation.""" import random as _random import time from dogpile.cache import api from dogpile.cache.backends import memcached from oslo_cache.backends import memcache_pool from oslo_config import cfg from six.moves import range from keystone import exception from keystone.i18n import _ CONF = cfg.CONF NO_VALUE = api.NO_VALUE random = _random.SystemRandom() VALID_DOGPILE_BACKENDS = dict( pylibmc=memcached.PylibmcBackend, bmemcached=memcached.BMemcachedBackend, memcached=memcached.MemcachedBackend, pooled_memcached=memcache_pool.PooledMemcachedBackend) class MemcachedLock(object): """Simple distributed lock using memcached. This is an adaptation of the lock featured at http://amix.dk/blog/post/19386 """ def __init__(self, client_fn, key, lock_timeout, max_lock_attempts): self.client_fn = client_fn self.key = "_lock" + key self.lock_timeout = lock_timeout self.max_lock_attempts = max_lock_attempts def acquire(self, wait=True): client = self.client_fn() for i in range(self.max_lock_attempts): if client.add(self.key, 1, self.lock_timeout): return True elif not wait: return False else: sleep_time = random.random() # nosec : random is not used for # crypto or security, it's just the time to delay between # retries. time.sleep(sleep_time) raise exception.UnexpectedError( _('Maximum lock attempts on %s occurred.') % self.key) def release(self): client = self.client_fn() client.delete(self.key) class MemcachedBackend(object): """Pivot point to leverage the various dogpile.cache memcached backends. To specify a specific dogpile.cache memcached backend, pass the argument `memcached_backend` set to one of the provided memcached backends (at this time `memcached`, `bmemcached`, `pylibmc` and `pooled_memcached` are valid). """ def __init__(self, arguments): self._key_mangler = None self.raw_no_expiry_keys = set(arguments.pop('no_expiry_keys', set())) self.no_expiry_hashed_keys = set() self.lock_timeout = arguments.pop('lock_timeout', None) self.max_lock_attempts = arguments.pop('max_lock_attempts', 15) # NOTE(morganfainberg): Remove distributed locking from the arguments # passed to the "real" backend if it exists. arguments.pop('distributed_lock', None) backend = arguments.pop('memcached_backend', None) if 'url' not in arguments: # FIXME(morganfainberg): Log deprecation warning for old-style # configuration once full dict_config style configuration for # KVS backends is supported. For now use the current memcache # section of the configuration. arguments['url'] = CONF.memcache.servers if backend is None: # NOTE(morganfainberg): Use the basic memcached backend if nothing # else is supplied. self.driver = VALID_DOGPILE_BACKENDS['memcached'](arguments) else: if backend not in VALID_DOGPILE_BACKENDS: raise ValueError( _('Backend `%(backend)s` is not a valid memcached ' 'backend. Valid backends: %(backend_list)s') % {'backend': backend, 'backend_list': ','.join(VALID_DOGPILE_BACKENDS.keys())}) else: self.driver = VALID_DOGPILE_BACKENDS[backend](arguments) def __getattr__(self, name): """Forward calls to the underlying driver.""" f = getattr(self.driver, name) setattr(self, name, f) return f def _get_set_arguments_driver_attr(self, exclude_expiry=False): # NOTE(morganfainberg): Shallow copy the .set_arguments dict to # ensure no changes cause the values to change in the instance # variable. set_arguments = getattr(self.driver, 'set_arguments', {}).copy() if exclude_expiry: # NOTE(morganfainberg): Explicitly strip out the 'time' key/value # from the set_arguments in the case that this key isn't meant # to expire set_arguments.pop('time', None) return set_arguments def set(self, key, value): mapping = {key: value} self.set_multi(mapping) def set_multi(self, mapping): mapping_keys = set(mapping.keys()) no_expiry_keys = mapping_keys.intersection(self.no_expiry_hashed_keys) has_expiry_keys = mapping_keys.difference(self.no_expiry_hashed_keys) if no_expiry_keys: # NOTE(morganfainberg): For keys that have expiry excluded, # bypass the backend and directly call the client. Bypass directly # to the client is required as the 'set_arguments' are applied to # all ``set`` and ``set_multi`` calls by the driver, by calling # the client directly it is possible to exclude the ``time`` # argument to the memcached server. new_mapping = {k: mapping[k] for k in no_expiry_keys} set_arguments = self._get_set_arguments_driver_attr( exclude_expiry=True) self.driver.client.set_multi(new_mapping, **set_arguments) if has_expiry_keys: new_mapping = {k: mapping[k] for k in has_expiry_keys} self.driver.set_multi(new_mapping) @classmethod def from_config_dict(cls, config_dict, prefix): prefix_len = len(prefix) return cls( {key[prefix_len:]: config_dict[key] for key in config_dict if key.startswith(prefix)}) @property def key_mangler(self): if self._key_mangler is None: self._key_mangler = self.driver.key_mangler return self._key_mangler @key_mangler.setter def key_mangler(self, key_mangler): if callable(key_mangler): self._key_mangler = key_mangler self._rehash_keys() elif key_mangler is None: # NOTE(morganfainberg): Set the hashed key map to the unhashed # list since we no longer have a key_mangler. self._key_mangler = None self.no_expiry_hashed_keys = self.raw_no_expiry_keys else: raise TypeError(_('`key_mangler` functions must be callable.')) def _rehash_keys(self): no_expire = set() for key in self.raw_no_expiry_keys: no_expire.add(self._key_mangler(key)) self.no_expiry_hashed_keys = no_expire def get_mutex(self, key): return MemcachedLock(lambda: self.driver.client, key, self.lock_timeout, self.max_lock_attempts) keystone-9.0.0/keystone/common/kvs/__init__.py0000664000567000056710000000217512701407102022566 0ustar jenkinsjenkins00000000000000# Copyright 2013 Metacloud, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from dogpile.cache import region from keystone.common.kvs.core import * # noqa # NOTE(morganfainberg): Provided backends are registered here in the __init__ # for the kvs system. Any out-of-tree backends should be registered via the # ``backends`` option in the ``[kvs]`` section of the Keystone configuration # file. region.register_backend( 'openstack.kvs.Memory', 'keystone.common.kvs.backends.inmemdb', 'MemoryBackend') region.register_backend( 'openstack.kvs.Memcached', 'keystone.common.kvs.backends.memcached', 'MemcachedBackend') keystone-9.0.0/keystone/common/kvs/core.py0000664000567000056710000004347512701407102021767 0ustar jenkinsjenkins00000000000000# Copyright 2013 Metacloud, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import threading import time import weakref from dogpile.cache import api from dogpile.cache import proxy from dogpile.cache import region from dogpile.cache import util as dogpile_util from dogpile.core import nameregistry from oslo_config import cfg from oslo_log import log from oslo_utils import importutils from oslo_utils import reflection from keystone import exception from keystone.i18n import _ from keystone.i18n import _LI from keystone.i18n import _LW __all__ = ('KeyValueStore', 'KeyValueStoreLock', 'LockTimeout', 'get_key_value_store') BACKENDS_REGISTERED = False CONF = cfg.CONF KEY_VALUE_STORE_REGISTRY = weakref.WeakValueDictionary() LOCK_WINDOW = 1 LOG = log.getLogger(__name__) NO_VALUE = api.NO_VALUE def _register_backends(): # NOTE(morganfainberg): This function exists to ensure we do not try and # register the backends prior to the configuration object being fully # available. We also need to ensure we do not register a given backend # more than one time. All backends will be prefixed with openstack.kvs # as the "short" name to reference them for configuration purposes. This # function is used in addition to the pre-registered backends in the # __init__ file for the KVS system. global BACKENDS_REGISTERED if not BACKENDS_REGISTERED: prefix = 'openstack.kvs.%s' for backend in CONF.kvs.backends: module, cls = backend.rsplit('.', 1) backend_name = prefix % cls LOG.debug(('Registering Dogpile Backend %(backend_path)s as ' '%(backend_name)s'), {'backend_path': backend, 'backend_name': backend_name}) region.register_backend(backend_name, module, cls) BACKENDS_REGISTERED = True def sha1_mangle_key(key): """Wrapper for dogpile's sha1_mangle_key. Taken from oslo_cache.core._sha1_mangle_key dogpile's sha1_mangle_key function expects an encoded string, so we should take steps to properly handle multiple inputs before passing the key through. """ try: key = key.encode('utf-8', errors='xmlcharrefreplace') except (UnicodeError, AttributeError): # nosec # NOTE(stevemar): if encoding fails just continue anyway. pass return dogpile_util.sha1_mangle_key(key) class LockTimeout(exception.UnexpectedError): debug_message_format = _('Lock Timeout occurred for key, %(target)s') class KeyValueStore(object): """Basic KVS manager object to support Keystone Key-Value-Store systems. This manager also supports the concept of locking a given key resource to allow for a guaranteed atomic transaction to the backend. """ def __init__(self, kvs_region): self.locking = True self._lock_timeout = 0 self._region = kvs_region self._security_strategy = None self._secret_key = None self._lock_registry = nameregistry.NameRegistry(self._create_mutex) def configure(self, backing_store, key_mangler=None, proxy_list=None, locking=True, **region_config_args): """Configure the KeyValueStore instance. :param backing_store: dogpile.cache short name of the region backend :param key_mangler: key_mangler function :param proxy_list: list of proxy classes to apply to the region :param locking: boolean that allows disabling of locking mechanism for this instantiation :param region_config_args: key-word args passed to the dogpile.cache backend for configuration """ if self.is_configured: # NOTE(morganfainberg): It is a bad idea to reconfigure a backend, # there are a lot of pitfalls and potential memory leaks that could # occur. By far the best approach is to re-create the KVS object # with the new configuration. raise RuntimeError(_('KVS region %s is already configured. ' 'Cannot reconfigure.') % self._region.name) self.locking = locking self._lock_timeout = region_config_args.pop( 'lock_timeout', CONF.kvs.default_lock_timeout) self._configure_region(backing_store, **region_config_args) self._set_key_mangler(key_mangler) self._apply_region_proxy(proxy_list) @property def is_configured(self): return 'backend' in self._region.__dict__ def _apply_region_proxy(self, proxy_list): if isinstance(proxy_list, list): proxies = [] for item in proxy_list: if isinstance(item, str): LOG.debug('Importing class %s as KVS proxy.', item) pxy = importutils.import_class(item) else: pxy = item if issubclass(pxy, proxy.ProxyBackend): proxies.append(pxy) else: pxy_cls_name = reflection.get_class_name( pxy, fully_qualified=False) LOG.warning(_LW('%s is not a dogpile.proxy.ProxyBackend'), pxy_cls_name) for proxy_cls in reversed(proxies): proxy_cls_name = reflection.get_class_name( proxy_cls, fully_qualified=False) LOG.info(_LI('Adding proxy \'%(proxy)s\' to KVS %(name)s.'), {'proxy': proxy_cls_name, 'name': self._region.name}) self._region.wrap(proxy_cls) def _assert_configured(self): if'backend' not in self._region.__dict__: raise exception.UnexpectedError(_('Key Value Store not ' 'configured: %s'), self._region.name) def _set_keymangler_on_backend(self, key_mangler): try: self._region.backend.key_mangler = key_mangler except Exception as e: # NOTE(morganfainberg): The setting of the key_mangler on the # backend is used to allow the backend to # calculate a hashed key value as needed. Not all backends # require the ability to calculate hashed keys. If the # backend does not support/require this feature log a # debug line and move on otherwise raise the proper exception. # Support of the feature is implied by the existence of the # 'raw_no_expiry_keys' attribute. if not hasattr(self._region.backend, 'raw_no_expiry_keys'): LOG.debug(('Non-expiring keys not supported/required by ' '%(region)s backend; unable to set ' 'key_mangler for backend: %(err)s'), {'region': self._region.name, 'err': e}) else: raise def _set_key_mangler(self, key_mangler): # Set the key_mangler that is appropriate for the given region being # configured here. The key_mangler function is called prior to storing # the value(s) in the backend. This is to help prevent collisions and # limit issues such as memcache's limited cache_key size. use_backend_key_mangler = getattr(self._region.backend, 'use_backend_key_mangler', False) if ((key_mangler is None or use_backend_key_mangler) and (self._region.backend.key_mangler is not None)): # NOTE(morganfainberg): Use the configured key_mangler as a first # choice. Second choice would be the key_mangler defined by the # backend itself. Finally, fall back to the defaults. The one # exception is if the backend defines `use_backend_key_mangler` # as True, which indicates the backend's key_mangler should be # the first choice. key_mangler = self._region.backend.key_mangler if CONF.kvs.enable_key_mangler: if key_mangler is not None: msg = _LI('Using %(func)s as KVS region %(name)s key_mangler') if callable(key_mangler): self._region.key_mangler = key_mangler LOG.info(msg, {'func': key_mangler.__name__, 'name': self._region.name}) else: # NOTE(morganfainberg): We failed to set the key_mangler, # we should error out here to ensure we aren't causing # key-length or collision issues. raise exception.ValidationError( _('`key_mangler` option must be a function reference')) else: msg = _LI('Using default keystone.common.kvs.sha1_mangle_key ' 'as KVS region %s key_mangler') LOG.info(msg, self._region.name) # NOTE(morganfainberg): Use 'default' keymangler to ensure # that unless explicitly changed, we mangle keys. This helps # to limit unintended cases of exceeding cache-key in backends # such as memcache. self._region.key_mangler = sha1_mangle_key self._set_keymangler_on_backend(self._region.key_mangler) else: LOG.info(_LI('KVS region %s key_mangler disabled.'), self._region.name) self._set_keymangler_on_backend(None) def _configure_region(self, backend, **config_args): prefix = CONF.kvs.config_prefix conf_dict = {} conf_dict['%s.backend' % prefix] = backend if 'distributed_lock' not in config_args: config_args['distributed_lock'] = True config_args['lock_timeout'] = self._lock_timeout # NOTE(morganfainberg): To mitigate race conditions on comparing # the timeout and current time on the lock mutex, we are building # in a static 1 second overlap where the lock will still be valid # in the backend but not from the perspective of the context # manager. Since we must develop to the lowest-common-denominator # when it comes to the backends, memcache's cache store is not more # refined than 1 second, therefore we must build in at least a 1 # second overlap. `lock_timeout` of 0 means locks never expire. if config_args['lock_timeout'] > 0: config_args['lock_timeout'] += LOCK_WINDOW for argument, value in config_args.items(): arg_key = '.'.join([prefix, 'arguments', argument]) conf_dict[arg_key] = value LOG.debug('KVS region configuration for %(name)s: %(config)r', {'name': self._region.name, 'config': conf_dict}) self._region.configure_from_config(conf_dict, '%s.' % prefix) def _mutex(self, key): return self._lock_registry.get(key) def _create_mutex(self, key): mutex = self._region.backend.get_mutex(key) if mutex is not None: return mutex else: return self._LockWrapper(lock_timeout=self._lock_timeout) class _LockWrapper(object): """weakref-capable threading.Lock wrapper.""" def __init__(self, lock_timeout): self.lock = threading.Lock() self.lock_timeout = lock_timeout def acquire(self, wait=True): return self.lock.acquire(wait) def release(self): self.lock.release() def get(self, key): """Get a single value from the KVS backend.""" self._assert_configured() value = self._region.get(key) if value is NO_VALUE: raise exception.NotFound(target=key) return value def get_multi(self, keys): """Get multiple values in a single call from the KVS backend.""" self._assert_configured() values = self._region.get_multi(keys) not_found = [] for index, key in enumerate(keys): if values[index] is NO_VALUE: not_found.append(key) if not_found: # NOTE(morganfainberg): If any of the multi-get values are non- # existent, we should raise a NotFound error to mimic the .get() # method's behavior. In all cases the internal dogpile NO_VALUE # should be masked from the consumer of the KeyValueStore. raise exception.NotFound(target=not_found) return values def set(self, key, value, lock=None): """Set a single value in the KVS backend.""" self._assert_configured() with self._action_with_lock(key, lock): self._region.set(key, value) def set_multi(self, mapping): """Set multiple key/value pairs in the KVS backend at once. Like delete_multi, this call does not serialize through the KeyValueStoreLock mechanism (locking cannot occur on more than one key in a given context without significant deadlock potential). """ self._assert_configured() self._region.set_multi(mapping) def delete(self, key, lock=None): """Delete a single key from the KVS backend. This method will raise NotFound if the key doesn't exist. The get and delete are done in a single transaction (via KeyValueStoreLock mechanism). """ self._assert_configured() with self._action_with_lock(key, lock): self.get(key) self._region.delete(key) def delete_multi(self, keys): """Delete multiple keys from the KVS backend in a single call. Like set_multi, this call does not serialize through the KeyValueStoreLock mechanism (locking cannot occur on more than one key in a given context without significant deadlock potential). """ self._assert_configured() self._region.delete_multi(keys) def get_lock(self, key): """Get a write lock on the KVS value referenced by `key`. The ability to get a context manager to pass into the set/delete methods allows for a single-transaction to occur while guaranteeing the backing store will not change between the start of the 'lock' and the end. Lock timeout is fixed to the KeyValueStore configured lock timeout. """ self._assert_configured() return KeyValueStoreLock(self._mutex(key), key, self.locking, self._lock_timeout) @contextlib.contextmanager def _action_with_lock(self, key, lock=None): """Wrapper context manager. Validates and handles the lock and lock timeout if passed in. """ if not isinstance(lock, KeyValueStoreLock): # NOTE(morganfainberg): Locking only matters if a lock is passed in # to this method. If lock isn't a KeyValueStoreLock, treat this as # if no locking needs to occur. yield else: if not lock.key == key: raise ValueError(_('Lock key must match target key: %(lock)s ' '!= %(target)s') % {'lock': lock.key, 'target': key}) if not lock.active: raise exception.ValidationError(_('Must be called within an ' 'active lock context.')) if not lock.expired: yield else: raise LockTimeout(target=key) class KeyValueStoreLock(object): """Basic KeyValueStoreLock context manager. Hooks into the dogpile.cache backend mutex allowing for distributed locking on resources. This is only a write lock, and will not prevent reads from occurring. """ def __init__(self, mutex, key, locking_enabled=True, lock_timeout=0): self.mutex = mutex self.key = key self.enabled = locking_enabled self.lock_timeout = lock_timeout self.active = False self.acquire_time = 0 def acquire(self): if self.enabled: self.mutex.acquire() LOG.debug('KVS lock acquired for: %s', self.key) self.active = True self.acquire_time = time.time() return self __enter__ = acquire @property def expired(self): if self.lock_timeout: calculated = time.time() - self.acquire_time + LOCK_WINDOW return calculated > self.lock_timeout else: return False def release(self): if self.enabled: self.mutex.release() if not self.expired: LOG.debug('KVS lock released for: %s', self.key) else: LOG.warning(_LW('KVS lock released (timeout reached) for: %s'), self.key) def __exit__(self, exc_type, exc_val, exc_tb): self.release() def get_key_value_store(name, kvs_region=None): """Retrieve key value store. Instantiate a new :class:`.KeyValueStore` or return a previous instantiation that has the same name. """ global KEY_VALUE_STORE_REGISTRY _register_backends() key_value_store = KEY_VALUE_STORE_REGISTRY.get(name) if key_value_store is None: if kvs_region is None: kvs_region = region.make_region(name=name) key_value_store = KeyValueStore(kvs_region) KEY_VALUE_STORE_REGISTRY[name] = key_value_store return key_value_store keystone-9.0.0/keystone/common/ldap/0000775000567000056710000000000012701407246020576 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/common/ldap/__init__.py0000664000567000056710000000117212701407102022677 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common.ldap.core import * # noqa keystone-9.0.0/keystone/common/ldap/core.py0000664000567000056710000023133112701407105022075 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import codecs import functools import os.path import re import sys import weakref import ldap.controls import ldap.filter import ldappool from oslo_log import log from oslo_utils import reflection import six from six.moves import map, zip from keystone.common import driver_hints from keystone import exception from keystone.i18n import _ from keystone.i18n import _LW LOG = log.getLogger(__name__) LDAP_VALUES = {'TRUE': True, 'FALSE': False} CONTROL_TREEDELETE = '1.2.840.113556.1.4.805' LDAP_SCOPES = {'one': ldap.SCOPE_ONELEVEL, 'sub': ldap.SCOPE_SUBTREE} LDAP_DEREF = {'always': ldap.DEREF_ALWAYS, 'default': None, 'finding': ldap.DEREF_FINDING, 'never': ldap.DEREF_NEVER, 'searching': ldap.DEREF_SEARCHING} LDAP_TLS_CERTS = {'never': ldap.OPT_X_TLS_NEVER, 'demand': ldap.OPT_X_TLS_DEMAND, 'allow': ldap.OPT_X_TLS_ALLOW} # RFC 4511 (The LDAP Protocol) defines a list containing only the OID '1.1' to # indicate that no attributes should be returned besides the DN. DN_ONLY = ['1.1'] _utf8_encoder = codecs.getencoder('utf-8') def utf8_encode(value): """Encode a basestring to UTF-8. If the string is unicode encode it to UTF-8, if the string is str then assume it's already encoded. Otherwise raise a TypeError. :param value: A basestring :returns: UTF-8 encoded version of value :raises TypeError: If value is not basestring """ if isinstance(value, six.text_type): return _utf8_encoder(value)[0] elif isinstance(value, six.binary_type): return value else: value_cls_name = reflection.get_class_name( value, fully_qualified=False) raise TypeError("value must be basestring, " "not %s" % value_cls_name) _utf8_decoder = codecs.getdecoder('utf-8') def utf8_decode(value): """Decode a from UTF-8 into unicode. If the value is a binary string assume it's UTF-8 encoded and decode it into a unicode string. Otherwise convert the value from its type into a unicode string. :param value: value to be returned as unicode :returns: value as unicode :raises UnicodeDecodeError: for invalid UTF-8 encoding """ if isinstance(value, six.binary_type): return _utf8_decoder(value)[0] return six.text_type(value) def py2ldap(val): """Type convert a Python value to a type accepted by LDAP (unicode). The LDAP API only accepts strings for values therefore convert the value's type to a unicode string. A subsequent type conversion will encode the unicode as UTF-8 as required by the python-ldap API, but for now we just want a string representation of the value. :param val: The value to convert to a LDAP string representation :returns: unicode string representation of value. """ if isinstance(val, bool): return u'TRUE' if val else u'FALSE' else: return six.text_type(val) def enabled2py(val): """Similar to ldap2py, only useful for the enabled attribute.""" try: return LDAP_VALUES[val] except KeyError: # nosec # It wasn't a boolean value, will try as an int instead. pass try: return int(val) except ValueError: # nosec # It wasn't an int either, will try as utf8 instead. pass return utf8_decode(val) def ldap2py(val): """Convert an LDAP formatted value to Python type used by OpenStack. Virtually all LDAP values are stored as UTF-8 encoded strings. OpenStack prefers values which are unicode friendly. :param val: LDAP formatted value :returns: val converted to preferred Python type """ return utf8_decode(val) def convert_ldap_result(ldap_result): """Convert LDAP search result to Python types used by OpenStack. Each result tuple is of the form (dn, attrs), where dn is a string containing the DN (distinguished name) of the entry, and attrs is a dictionary containing the attributes associated with the entry. The keys of attrs are strings, and the associated values are lists of strings. OpenStack wants to use Python types of its choosing. Strings will be unicode, truth values boolean, whole numbers int's, etc. DN's will also be decoded from UTF-8 to unicode. :param ldap_result: LDAP search result :returns: list of 2-tuples containing (dn, attrs) where dn is unicode and attrs is a dict whose values are type converted to OpenStack preferred types. """ py_result = [] at_least_one_referral = False for dn, attrs in ldap_result: ldap_attrs = {} if dn is None: # this is a Referral object, rather than an Entry object at_least_one_referral = True continue for kind, values in attrs.items(): try: val2py = enabled2py if kind == 'enabled' else ldap2py ldap_attrs[kind] = [val2py(x) for x in values] except UnicodeDecodeError: LOG.debug('Unable to decode value for attribute %s', kind) py_result.append((utf8_decode(dn), ldap_attrs)) if at_least_one_referral: LOG.debug(('Referrals were returned and ignored. Enable referral ' 'chasing in keystone.conf via [ldap] chase_referrals')) return py_result def safe_iter(attrs): if attrs is None: return elif isinstance(attrs, list): for e in attrs: yield e else: yield attrs def parse_deref(opt): try: return LDAP_DEREF[opt] except KeyError: raise ValueError(_('Invalid LDAP deref option: %(option)s. ' 'Choose one of: %(options)s') % {'option': opt, 'options': ', '.join(LDAP_DEREF.keys()), }) def parse_tls_cert(opt): try: return LDAP_TLS_CERTS[opt] except KeyError: raise ValueError(_( 'Invalid LDAP TLS certs option: %(option)s. ' 'Choose one of: %(options)s') % { 'option': opt, 'options': ', '.join(LDAP_TLS_CERTS.keys())}) def ldap_scope(scope): try: return LDAP_SCOPES[scope] except KeyError: raise ValueError( _('Invalid LDAP scope: %(scope)s. Choose one of: %(options)s') % { 'scope': scope, 'options': ', '.join(LDAP_SCOPES.keys())}) def prep_case_insensitive(value): """Prepare a string for case-insensitive comparison. This is defined in RFC4518. For simplicity, all this function does is lowercase all the characters, strip leading and trailing whitespace, and compress sequences of spaces to a single space. """ value = re.sub(r'\s+', ' ', value.strip().lower()) return value def is_ava_value_equal(attribute_type, val1, val2): """Returns True if and only if the AVAs are equal. When comparing AVAs, the equality matching rule for the attribute type should be taken into consideration. For simplicity, this implementation does a case-insensitive comparison. Note that this function uses prep_case_insenstive so the limitations of that function apply here. """ return prep_case_insensitive(val1) == prep_case_insensitive(val2) def is_rdn_equal(rdn1, rdn2): """Returns True if and only if the RDNs are equal. * RDNs must have the same number of AVAs. * Each AVA of the RDNs must be the equal for the same attribute type. The order isn't significant. Note that an attribute type will only be in one AVA in an RDN, otherwise the DN wouldn't be valid. * Attribute types aren't case sensitive. Note that attribute type comparison is more complicated than implemented. This function only compares case-insentive. The code should handle multiple names for an attribute type (e.g., cn, commonName, and 2.5.4.3 are the same). Note that this function uses is_ava_value_equal to compare AVAs so the limitations of that function apply here. """ if len(rdn1) != len(rdn2): return False for attr_type_1, val1, dummy in rdn1: found = False for attr_type_2, val2, dummy in rdn2: if attr_type_1.lower() != attr_type_2.lower(): continue found = True if not is_ava_value_equal(attr_type_1, val1, val2): return False break if not found: return False return True def is_dn_equal(dn1, dn2): """Returns True if and only if the DNs are equal. Two DNs are equal if they've got the same number of RDNs and if the RDNs are the same at each position. See RFC4517. Note that this function uses is_rdn_equal to compare RDNs so the limitations of that function apply here. :param dn1: Either a string DN or a DN parsed by ldap.dn.str2dn. :param dn2: Either a string DN or a DN parsed by ldap.dn.str2dn. """ if not isinstance(dn1, list): dn1 = ldap.dn.str2dn(utf8_encode(dn1)) if not isinstance(dn2, list): dn2 = ldap.dn.str2dn(utf8_encode(dn2)) if len(dn1) != len(dn2): return False for rdn1, rdn2 in zip(dn1, dn2): if not is_rdn_equal(rdn1, rdn2): return False return True def dn_startswith(descendant_dn, dn): """Returns True if and only if the descendant_dn is under the dn. :param descendant_dn: Either a string DN or a DN parsed by ldap.dn.str2dn. :param dn: Either a string DN or a DN parsed by ldap.dn.str2dn. """ if not isinstance(descendant_dn, list): descendant_dn = ldap.dn.str2dn(utf8_encode(descendant_dn)) if not isinstance(dn, list): dn = ldap.dn.str2dn(utf8_encode(dn)) if len(descendant_dn) <= len(dn): return False # Use the last len(dn) RDNs. return is_dn_equal(descendant_dn[-len(dn):], dn) @six.add_metaclass(abc.ABCMeta) class LDAPHandler(object): """Abstract class which defines methods for a LDAP API provider. Native Keystone values cannot be passed directly into and from the python-ldap API. Type conversion must occur at the LDAP API boudary, examples of type conversions are: * booleans map to the strings 'TRUE' and 'FALSE' * integer values map to their string representation. * unicode strings are encoded in UTF-8 In addition to handling type conversions at the API boundary we have the requirement to support more than one LDAP API provider. Currently we have: * python-ldap, this is the standard LDAP API for Python, it requires access to a live LDAP server. * Fake LDAP which emulates python-ldap. This is used for testing without requiring a live LDAP server. To support these requirements we need a layer that performs type conversions and then calls another LDAP API which is configurable (e.g. either python-ldap or the fake emulation). We have an additional constraint at the time of this writing due to limitations in the logging module. The logging module is not capable of accepting UTF-8 encoded strings, it will throw an encoding exception. Therefore all logging MUST be performed prior to UTF-8 conversion. This means no logging can be performed in the ldap APIs that implement the python-ldap API because those APIs are defined to accept only UTF-8 strings. Thus the layer which performs type conversions must also do the logging. We do the type conversions in two steps, once to convert all Python types to unicode strings, then log, then convert the unicode strings to UTF-8. There are a variety of ways one could accomplish this, we elect to use a chaining technique whereby instances of this class simply call the next member in the chain via the "conn" attribute. The chain is constructed by passing in an existing instance of this class as the conn attribute when the class is instantiated. Here is a brief explanation of why other possible approaches were not used: subclassing To perform the wrapping operations in the correct order the type convesion class would have to subclass each of the API providers. This is awkward, doubles the number of classes, and does not scale well. It requires the type conversion class to be aware of all possible API providers. decorators Decorators provide an elegant solution to wrap methods and would be an ideal way to perform type conversions before calling the wrapped function and then converting the values returned from the wrapped function. However decorators need to be aware of the method signature, it has to know what input parameters need conversion and how to convert the result. For an API like python-ldap which has a large number of different method signatures it would require a large number of specialized decorators. Experience has shown it's very easy to apply the wrong decorator due to the inherent complexity and tendency to cut-n-paste code. Another option is to parameterize the decorator to make it "smart". Experience has shown such decorators become insanely complicated and difficult to understand and debug. Also decorators tend to hide what's really going on when a method is called, the operations being performed are not visible when looking at the implemation of a decorated method, this too experience has shown leads to mistakes. Chaining simplifies both wrapping to perform type conversion as well as the substitution of alternative API providers. One simply creates a new instance of the API interface and insert it at the front of the chain. Type conversions are explicit and obvious. If a new method needs to be added to the API interface one adds it to the abstract class definition. Should one miss adding the new method to any derivations of the abstract class the code will fail to load and run making it impossible to forget updating all the derived classes. """ @abc.abstractmethod def __init__(self, conn=None): self.conn = conn @abc.abstractmethod def connect(self, url, page_size=0, alias_dereferencing=None, use_tls=False, tls_cacertfile=None, tls_cacertdir=None, tls_req_cert='demand', chase_referrals=None, debug_level=None, use_pool=None, pool_size=None, pool_retry_max=None, pool_retry_delay=None, pool_conn_timeout=None, pool_conn_lifetime=None): raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def set_option(self, option, invalue): raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_option(self, option): raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def simple_bind_s(self, who='', cred='', serverctrls=None, clientctrls=None): raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def unbind_s(self): raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def add_s(self, dn, modlist): raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def search_s(self, base, scope, filterstr='(objectClass=*)', attrlist=None, attrsonly=0): raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def search_ext(self, base, scope, filterstr='(objectClass=*)', attrlist=None, attrsonly=0, serverctrls=None, clientctrls=None, timeout=-1, sizelimit=0): raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def result3(self, msgid=ldap.RES_ANY, all=1, timeout=None, resp_ctrl_classes=None): raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def modify_s(self, dn, modlist): raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_s(self, dn): raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_ext_s(self, dn, serverctrls=None, clientctrls=None): raise exception.NotImplemented() # pragma: no cover class PythonLDAPHandler(LDAPHandler): """LDAPHandler implementation which calls the python-ldap API. Note, the python-ldap API requires all string values to be UTF-8 encoded. The KeystoneLDAPHandler enforces this prior to invoking the methods in this class. """ def __init__(self, conn=None): super(PythonLDAPHandler, self).__init__(conn=conn) def connect(self, url, page_size=0, alias_dereferencing=None, use_tls=False, tls_cacertfile=None, tls_cacertdir=None, tls_req_cert='demand', chase_referrals=None, debug_level=None, use_pool=None, pool_size=None, pool_retry_max=None, pool_retry_delay=None, pool_conn_timeout=None, pool_conn_lifetime=None): _common_ldap_initialization(url=url, use_tls=use_tls, tls_cacertfile=tls_cacertfile, tls_cacertdir=tls_cacertdir, tls_req_cert=tls_req_cert, debug_level=debug_level) self.conn = ldap.initialize(url) self.conn.protocol_version = ldap.VERSION3 if alias_dereferencing is not None: self.conn.set_option(ldap.OPT_DEREF, alias_dereferencing) self.page_size = page_size if use_tls: self.conn.start_tls_s() if chase_referrals is not None: self.conn.set_option(ldap.OPT_REFERRALS, int(chase_referrals)) def set_option(self, option, invalue): return self.conn.set_option(option, invalue) def get_option(self, option): return self.conn.get_option(option) def simple_bind_s(self, who='', cred='', serverctrls=None, clientctrls=None): return self.conn.simple_bind_s(who, cred, serverctrls, clientctrls) def unbind_s(self): return self.conn.unbind_s() def add_s(self, dn, modlist): return self.conn.add_s(dn, modlist) def search_s(self, base, scope, filterstr='(objectClass=*)', attrlist=None, attrsonly=0): return self.conn.search_s(base, scope, filterstr, attrlist, attrsonly) def search_ext(self, base, scope, filterstr='(objectClass=*)', attrlist=None, attrsonly=0, serverctrls=None, clientctrls=None, timeout=-1, sizelimit=0): return self.conn.search_ext(base, scope, filterstr, attrlist, attrsonly, serverctrls, clientctrls, timeout, sizelimit) def result3(self, msgid=ldap.RES_ANY, all=1, timeout=None, resp_ctrl_classes=None): # The resp_ctrl_classes parameter is a recent addition to the # API. It defaults to None. We do not anticipate using it. # To run with older versions of python-ldap we do not pass it. return self.conn.result3(msgid, all, timeout) def modify_s(self, dn, modlist): return self.conn.modify_s(dn, modlist) def delete_s(self, dn): return self.conn.delete_s(dn) def delete_ext_s(self, dn, serverctrls=None, clientctrls=None): return self.conn.delete_ext_s(dn, serverctrls, clientctrls) def _common_ldap_initialization(url, use_tls=False, tls_cacertfile=None, tls_cacertdir=None, tls_req_cert=None, debug_level=None): """LDAP initialization for PythonLDAPHandler and PooledLDAPHandler.""" LOG.debug("LDAP init: url=%s", url) LOG.debug('LDAP init: use_tls=%s tls_cacertfile=%s tls_cacertdir=%s ' 'tls_req_cert=%s tls_avail=%s', use_tls, tls_cacertfile, tls_cacertdir, tls_req_cert, ldap.TLS_AVAIL) if debug_level is not None: ldap.set_option(ldap.OPT_DEBUG_LEVEL, debug_level) using_ldaps = url.lower().startswith("ldaps") if use_tls and using_ldaps: raise AssertionError(_('Invalid TLS / LDAPS combination')) # The certificate trust options apply for both LDAPS and TLS. if use_tls or using_ldaps: if not ldap.TLS_AVAIL: raise ValueError(_('Invalid LDAP TLS_AVAIL option: %s. TLS ' 'not available') % ldap.TLS_AVAIL) if tls_cacertfile: # NOTE(topol) # python ldap TLS does not verify CACERTFILE or CACERTDIR # so we add some extra simple sanity check verification # Also, setting these values globally (i.e. on the ldap object) # works but these values are ignored when setting them on the # connection if not os.path.isfile(tls_cacertfile): raise IOError(_("tls_cacertfile %s not found " "or is not a file") % tls_cacertfile) ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, tls_cacertfile) elif tls_cacertdir: # NOTE(topol) # python ldap TLS does not verify CACERTFILE or CACERTDIR # so we add some extra simple sanity check verification # Also, setting these values globally (i.e. on the ldap object) # works but these values are ignored when setting them on the # connection if not os.path.isdir(tls_cacertdir): raise IOError(_("tls_cacertdir %s not found " "or is not a directory") % tls_cacertdir) ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, tls_cacertdir) if tls_req_cert in list(LDAP_TLS_CERTS.values()): ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, tls_req_cert) else: LOG.debug("LDAP TLS: invalid TLS_REQUIRE_CERT Option=%s", tls_req_cert) class MsgId(list): """Wrapper class to hold connection and msgid.""" pass def use_conn_pool(func): """Use this only for connection pool specific ldap API. This adds connection object to decorated API as next argument after self. """ def wrapper(self, *args, **kwargs): # assert isinstance(self, PooledLDAPHandler) with self._get_pool_connection() as conn: self._apply_options(conn) return func(self, conn, *args, **kwargs) return wrapper class PooledLDAPHandler(LDAPHandler): """LDAPHandler implementation which uses pooled connection manager. Pool specific configuration is defined in [ldap] section. All other LDAP configuration is still used from [ldap] section Keystone LDAP authentication logic authenticates an end user using its DN and password via LDAP bind to establish supplied password is correct. This can fill up the pool quickly (as pool re-uses existing connection based on its bind data) and would not leave space in pool for connection re-use for other LDAP operations. Now a separate pool can be established for those requests when related flag 'use_auth_pool' is enabled. That pool can have its own size and connection lifetime. Other pool attributes are shared between those pools. If 'use_pool' is disabled, then 'use_auth_pool' does not matter. If 'use_auth_pool' is not enabled, then connection pooling is not used for those LDAP operations. Note, the python-ldap API requires all string values to be UTF-8 encoded. The KeystoneLDAPHandler enforces this prior to invoking the methods in this class. """ # Added here to allow override for testing Connector = ldappool.StateConnector auth_pool_prefix = 'auth_pool_' connection_pools = {} # static connector pool dict def __init__(self, conn=None, use_auth_pool=False): super(PooledLDAPHandler, self).__init__(conn=conn) self.who = '' self.cred = '' self.conn_options = {} # connection specific options self.page_size = None self.use_auth_pool = use_auth_pool self.conn_pool = None def connect(self, url, page_size=0, alias_dereferencing=None, use_tls=False, tls_cacertfile=None, tls_cacertdir=None, tls_req_cert='demand', chase_referrals=None, debug_level=None, use_pool=None, pool_size=None, pool_retry_max=None, pool_retry_delay=None, pool_conn_timeout=None, pool_conn_lifetime=None): _common_ldap_initialization(url=url, use_tls=use_tls, tls_cacertfile=tls_cacertfile, tls_cacertdir=tls_cacertdir, tls_req_cert=tls_req_cert, debug_level=debug_level) self.page_size = page_size # Following two options are not added in common initialization as they # need to follow a sequence in PythonLDAPHandler code. if alias_dereferencing is not None: self.set_option(ldap.OPT_DEREF, alias_dereferencing) if chase_referrals is not None: self.set_option(ldap.OPT_REFERRALS, int(chase_referrals)) if self.use_auth_pool: # separate pool when use_auth_pool enabled pool_url = self.auth_pool_prefix + url else: pool_url = url try: self.conn_pool = self.connection_pools[pool_url] except KeyError: self.conn_pool = ldappool.ConnectionManager( url, size=pool_size, retry_max=pool_retry_max, retry_delay=pool_retry_delay, timeout=pool_conn_timeout, connector_cls=self.Connector, use_tls=use_tls, max_lifetime=pool_conn_lifetime) self.connection_pools[pool_url] = self.conn_pool def set_option(self, option, invalue): self.conn_options[option] = invalue def get_option(self, option): value = self.conn_options.get(option) # if option was not specified explicitly, then use connection default # value for that option if there. if value is None: with self._get_pool_connection() as conn: value = conn.get_option(option) return value def _apply_options(self, conn): # if connection has a lifetime, then it already has options specified if conn.get_lifetime() > 30: return for option, invalue in self.conn_options.items(): conn.set_option(option, invalue) def _get_pool_connection(self): return self.conn_pool.connection(self.who, self.cred) def simple_bind_s(self, who='', cred='', serverctrls=None, clientctrls=None): # Not using use_conn_pool decorator here as this API takes cred as # input. self.who = who self.cred = cred with self._get_pool_connection() as conn: self._apply_options(conn) def unbind_s(self): # After connection generator is done `with` statement execution block # connection is always released via finally block in ldappool. # So this unbind is a no op. pass @use_conn_pool def add_s(self, conn, dn, modlist): return conn.add_s(dn, modlist) @use_conn_pool def search_s(self, conn, base, scope, filterstr='(objectClass=*)', attrlist=None, attrsonly=0): return conn.search_s(base, scope, filterstr, attrlist, attrsonly) def search_ext(self, base, scope, filterstr='(objectClass=*)', attrlist=None, attrsonly=0, serverctrls=None, clientctrls=None, timeout=-1, sizelimit=0): """Asynchronous API to return a ``MsgId`` instance. The ``MsgId`` instance can be safely used in a call to ``result3()``. To work with ``result3()`` API in predictable manner, the same LDAP connection is needed which originally provided the ``msgid``. So, this method wraps the existing connection and ``msgid`` in a new ``MsgId`` instance. The connection associated with ``search_ext`` is released once last hard reference to the ``MsgId`` instance is freed. """ conn_ctxt = self._get_pool_connection() conn = conn_ctxt.__enter__() try: msgid = conn.search_ext(base, scope, filterstr, attrlist, attrsonly, serverctrls, clientctrls, timeout, sizelimit) except Exception: conn_ctxt.__exit__(*sys.exc_info()) raise res = MsgId((conn, msgid)) weakref.ref(res, functools.partial(conn_ctxt.__exit__, None, None, None)) return res def result3(self, msgid, all=1, timeout=None, resp_ctrl_classes=None): """This method is used to wait for and return result. This method returns the result of an operation previously initiated by one of the LDAP asynchronous operation routines (eg search_ext()). It returned an invocation identifier (a message id) upon successful initiation of their operation. Input msgid is expected to be instance of class MsgId which has LDAP session/connection used to execute search_ext and message idenfier. The connection associated with search_ext is released once last hard reference to MsgId object is freed. This will happen when function which requested msgId and used it in result3 exits. """ conn, msg_id = msgid return conn.result3(msg_id, all, timeout) @use_conn_pool def modify_s(self, conn, dn, modlist): return conn.modify_s(dn, modlist) @use_conn_pool def delete_s(self, conn, dn): return conn.delete_s(dn) @use_conn_pool def delete_ext_s(self, conn, dn, serverctrls=None, clientctrls=None): return conn.delete_ext_s(dn, serverctrls, clientctrls) class KeystoneLDAPHandler(LDAPHandler): """Convert data types and perform logging. This LDAP inteface wraps the python-ldap based interfaces. The python-ldap interfaces require string values encoded in UTF-8. The OpenStack logging framework at the time of this writing is not capable of accepting strings encoded in UTF-8, the log functions will throw decoding errors if a non-ascii character appears in a string. Prior to the call Python data types are converted to a string representation as required by the LDAP APIs. Then logging is performed so we can track what is being sent/received from LDAP. Also the logging filters security sensitive items (i.e. passwords). Then the string values are encoded into UTF-8. Then the LDAP API entry point is invoked. Data returned from the LDAP call is converted back from UTF-8 encoded strings into the Python data type used internally in OpenStack. """ def __init__(self, conn=None): super(KeystoneLDAPHandler, self).__init__(conn=conn) self.page_size = 0 def __enter__(self): return self def _disable_paging(self): # Disable the pagination from now on self.page_size = 0 def connect(self, url, page_size=0, alias_dereferencing=None, use_tls=False, tls_cacertfile=None, tls_cacertdir=None, tls_req_cert='demand', chase_referrals=None, debug_level=None, use_pool=None, pool_size=None, pool_retry_max=None, pool_retry_delay=None, pool_conn_timeout=None, pool_conn_lifetime=None): self.page_size = page_size return self.conn.connect(url, page_size, alias_dereferencing, use_tls, tls_cacertfile, tls_cacertdir, tls_req_cert, chase_referrals, debug_level=debug_level, use_pool=use_pool, pool_size=pool_size, pool_retry_max=pool_retry_max, pool_retry_delay=pool_retry_delay, pool_conn_timeout=pool_conn_timeout, pool_conn_lifetime=pool_conn_lifetime) def set_option(self, option, invalue): return self.conn.set_option(option, invalue) def get_option(self, option): return self.conn.get_option(option) def simple_bind_s(self, who='', cred='', serverctrls=None, clientctrls=None): LOG.debug("LDAP bind: who=%s", who) who_utf8 = utf8_encode(who) cred_utf8 = utf8_encode(cred) return self.conn.simple_bind_s(who_utf8, cred_utf8, serverctrls=serverctrls, clientctrls=clientctrls) def unbind_s(self): LOG.debug("LDAP unbind") return self.conn.unbind_s() def add_s(self, dn, modlist): ldap_attrs = [(kind, [py2ldap(x) for x in safe_iter(values)]) for kind, values in modlist] logging_attrs = [(kind, values if kind != 'userPassword' else ['****']) for kind, values in ldap_attrs] LOG.debug('LDAP add: dn=%s attrs=%s', dn, logging_attrs) dn_utf8 = utf8_encode(dn) ldap_attrs_utf8 = [(kind, [utf8_encode(x) for x in safe_iter(values)]) for kind, values in ldap_attrs] return self.conn.add_s(dn_utf8, ldap_attrs_utf8) def search_s(self, base, scope, filterstr='(objectClass=*)', attrlist=None, attrsonly=0): # NOTE(morganfainberg): Remove "None" singletons from this list, which # allows us to set mapped attributes to "None" as defaults in config. # Without this filtering, the ldap query would raise a TypeError since # attrlist is expected to be an iterable of strings. if attrlist is not None: attrlist = [attr for attr in attrlist if attr is not None] LOG.debug('LDAP search: base=%s scope=%s filterstr=%s ' 'attrs=%s attrsonly=%s', base, scope, filterstr, attrlist, attrsonly) if self.page_size: ldap_result = self._paged_search_s(base, scope, filterstr, attrlist) else: base_utf8 = utf8_encode(base) filterstr_utf8 = utf8_encode(filterstr) if attrlist is None: attrlist_utf8 = None else: attrlist_utf8 = list(map(utf8_encode, attrlist)) ldap_result = self.conn.search_s(base_utf8, scope, filterstr_utf8, attrlist_utf8, attrsonly) py_result = convert_ldap_result(ldap_result) return py_result def search_ext(self, base, scope, filterstr='(objectClass=*)', attrlist=None, attrsonly=0, serverctrls=None, clientctrls=None, timeout=-1, sizelimit=0): if attrlist is not None: attrlist = [attr for attr in attrlist if attr is not None] LOG.debug('LDAP search_ext: base=%s scope=%s filterstr=%s ' 'attrs=%s attrsonly=%s ' 'serverctrls=%s clientctrls=%s timeout=%s sizelimit=%s', base, scope, filterstr, attrlist, attrsonly, serverctrls, clientctrls, timeout, sizelimit) return self.conn.search_ext(base, scope, filterstr, attrlist, attrsonly, serverctrls, clientctrls, timeout, sizelimit) def _paged_search_s(self, base, scope, filterstr, attrlist=None): res = [] use_old_paging_api = False # The API for the simple paged results control changed between # python-ldap 2.3 and 2.4. We need to detect the capabilities # of the python-ldap version we are using. if hasattr(ldap, 'LDAP_CONTROL_PAGE_OID'): use_old_paging_api = True lc = ldap.controls.SimplePagedResultsControl( controlType=ldap.LDAP_CONTROL_PAGE_OID, criticality=True, controlValue=(self.page_size, '')) page_ctrl_oid = ldap.LDAP_CONTROL_PAGE_OID else: lc = ldap.controls.libldap.SimplePagedResultsControl( criticality=True, size=self.page_size, cookie='') page_ctrl_oid = ldap.controls.SimplePagedResultsControl.controlType base_utf8 = utf8_encode(base) filterstr_utf8 = utf8_encode(filterstr) if attrlist is None: attrlist_utf8 = None else: attrlist = [attr for attr in attrlist if attr is not None] attrlist_utf8 = list(map(utf8_encode, attrlist)) msgid = self.conn.search_ext(base_utf8, scope, filterstr_utf8, attrlist_utf8, serverctrls=[lc]) # Endless loop request pages on ldap server until it has no data while True: # Request to the ldap server a page with 'page_size' entries rtype, rdata, rmsgid, serverctrls = self.conn.result3(msgid) # Receive the data res.extend(rdata) pctrls = [c for c in serverctrls if c.controlType == page_ctrl_oid] if pctrls: # LDAP server supports pagination if use_old_paging_api: est, cookie = pctrls[0].controlValue lc.controlValue = (self.page_size, cookie) else: cookie = lc.cookie = pctrls[0].cookie if cookie: # There is more data still on the server # so we request another page msgid = self.conn.search_ext(base_utf8, scope, filterstr_utf8, attrlist_utf8, serverctrls=[lc]) else: # Exit condition no more data on server break else: LOG.warning(_LW('LDAP Server does not support paging. ' 'Disable paging in keystone.conf to ' 'avoid this message.')) self._disable_paging() break return res def result3(self, msgid=ldap.RES_ANY, all=1, timeout=None, resp_ctrl_classes=None): ldap_result = self.conn.result3(msgid, all, timeout, resp_ctrl_classes) LOG.debug('LDAP result3: msgid=%s all=%s timeout=%s ' 'resp_ctrl_classes=%s ldap_result=%s', msgid, all, timeout, resp_ctrl_classes, ldap_result) # ldap_result returned from result3 is a tuple of # (rtype, rdata, rmsgid, serverctrls). We don't need use of these, # except rdata. rtype, rdata, rmsgid, serverctrls = ldap_result py_result = convert_ldap_result(rdata) return py_result def modify_s(self, dn, modlist): ldap_modlist = [ (op, kind, (None if values is None else [py2ldap(x) for x in safe_iter(values)])) for op, kind, values in modlist] logging_modlist = [(op, kind, (values if kind != 'userPassword' else ['****'])) for op, kind, values in ldap_modlist] LOG.debug('LDAP modify: dn=%s modlist=%s', dn, logging_modlist) dn_utf8 = utf8_encode(dn) ldap_modlist_utf8 = [ (op, kind, (None if values is None else [utf8_encode(x) for x in safe_iter(values)])) for op, kind, values in ldap_modlist] return self.conn.modify_s(dn_utf8, ldap_modlist_utf8) def delete_s(self, dn): LOG.debug("LDAP delete: dn=%s", dn) dn_utf8 = utf8_encode(dn) return self.conn.delete_s(dn_utf8) def delete_ext_s(self, dn, serverctrls=None, clientctrls=None): LOG.debug('LDAP delete_ext: dn=%s serverctrls=%s clientctrls=%s', dn, serverctrls, clientctrls) dn_utf8 = utf8_encode(dn) return self.conn.delete_ext_s(dn_utf8, serverctrls, clientctrls) def __exit__(self, exc_type, exc_val, exc_tb): self.unbind_s() _HANDLERS = {} def register_handler(prefix, handler): _HANDLERS[prefix] = handler def _get_connection(conn_url, use_pool=False, use_auth_pool=False): for prefix, handler in _HANDLERS.items(): if conn_url.startswith(prefix): return handler() if use_pool: return PooledLDAPHandler(use_auth_pool=use_auth_pool) else: return PythonLDAPHandler() def filter_entity(entity_ref): """Filter out private items in an entity dict. :param entity_ref: the entity dictionary. The 'dn' field will be removed. 'dn' is used in LDAP, but should not be returned to the user. This value may be modified. :returns: entity_ref """ if entity_ref: entity_ref.pop('dn', None) return entity_ref class BaseLdap(object): DEFAULT_OU = None DEFAULT_STRUCTURAL_CLASSES = None DEFAULT_ID_ATTR = 'cn' DEFAULT_OBJECTCLASS = None DEFAULT_FILTER = None DEFAULT_EXTRA_ATTR_MAPPING = [] DUMB_MEMBER_DN = 'cn=dumb,dc=nonexistent' NotFound = None notfound_arg = None options_name = None model = None attribute_options_names = {} immutable_attrs = [] attribute_ignore = [] tree_dn = None def __init__(self, conf): self.LDAP_URL = conf.ldap.url self.LDAP_USER = conf.ldap.user self.LDAP_PASSWORD = conf.ldap.password self.LDAP_SCOPE = ldap_scope(conf.ldap.query_scope) self.alias_dereferencing = parse_deref(conf.ldap.alias_dereferencing) self.page_size = conf.ldap.page_size self.use_tls = conf.ldap.use_tls self.tls_cacertfile = conf.ldap.tls_cacertfile self.tls_cacertdir = conf.ldap.tls_cacertdir self.tls_req_cert = parse_tls_cert(conf.ldap.tls_req_cert) self.attribute_mapping = {} self.chase_referrals = conf.ldap.chase_referrals self.debug_level = conf.ldap.debug_level # LDAP Pool specific attribute self.use_pool = conf.ldap.use_pool self.pool_size = conf.ldap.pool_size self.pool_retry_max = conf.ldap.pool_retry_max self.pool_retry_delay = conf.ldap.pool_retry_delay self.pool_conn_timeout = conf.ldap.pool_connection_timeout self.pool_conn_lifetime = conf.ldap.pool_connection_lifetime # End user authentication pool specific config attributes self.use_auth_pool = self.use_pool and conf.ldap.use_auth_pool self.auth_pool_size = conf.ldap.auth_pool_size self.auth_pool_conn_lifetime = conf.ldap.auth_pool_connection_lifetime if self.options_name is not None: self.suffix = conf.ldap.suffix dn = '%s_tree_dn' % self.options_name self.tree_dn = (getattr(conf.ldap, dn) or '%s,%s' % (self.DEFAULT_OU, self.suffix)) idatt = '%s_id_attribute' % self.options_name self.id_attr = getattr(conf.ldap, idatt) or self.DEFAULT_ID_ATTR objclass = '%s_objectclass' % self.options_name self.object_class = (getattr(conf.ldap, objclass) or self.DEFAULT_OBJECTCLASS) for k, v in self.attribute_options_names.items(): v = '%s_%s_attribute' % (self.options_name, v) self.attribute_mapping[k] = getattr(conf.ldap, v) attr_mapping_opt = ('%s_additional_attribute_mapping' % self.options_name) attr_mapping = (getattr(conf.ldap, attr_mapping_opt) or self.DEFAULT_EXTRA_ATTR_MAPPING) self.extra_attr_mapping = self._parse_extra_attrs(attr_mapping) ldap_filter = '%s_filter' % self.options_name self.ldap_filter = getattr(conf.ldap, ldap_filter) or self.DEFAULT_FILTER allow_create = '%s_allow_create' % self.options_name self.allow_create = getattr(conf.ldap, allow_create) allow_update = '%s_allow_update' % self.options_name self.allow_update = getattr(conf.ldap, allow_update) allow_delete = '%s_allow_delete' % self.options_name self.allow_delete = getattr(conf.ldap, allow_delete) member_attribute = '%s_member_attribute' % self.options_name self.member_attribute = getattr(conf.ldap, member_attribute, None) self.structural_classes = self.DEFAULT_STRUCTURAL_CLASSES if self.notfound_arg is None: self.notfound_arg = self.options_name + '_id' attribute_ignore = '%s_attribute_ignore' % self.options_name self.attribute_ignore = getattr(conf.ldap, attribute_ignore) self.use_dumb_member = conf.ldap.use_dumb_member self.dumb_member = (conf.ldap.dumb_member or self.DUMB_MEMBER_DN) self.subtree_delete_enabled = conf.ldap.allow_subtree_delete def _not_found(self, object_id): if self.NotFound is None: return exception.NotFound(target=object_id) else: return self.NotFound(**{self.notfound_arg: object_id}) def _parse_extra_attrs(self, option_list): mapping = {} for item in option_list: try: ldap_attr, attr_map = item.split(':') except Exception: LOG.warning(_LW( 'Invalid additional attribute mapping: "%s". ' 'Format must be :'), item) continue mapping[ldap_attr] = attr_map return mapping def _is_dumb_member(self, member_dn): """Checks that member is a dumb member. :param member_dn: DN of member to be checked. """ return (self.use_dumb_member and is_dn_equal(member_dn, self.dumb_member)) def get_connection(self, user=None, password=None, end_user_auth=False): use_pool = self.use_pool pool_size = self.pool_size pool_conn_lifetime = self.pool_conn_lifetime if end_user_auth: if not self.use_auth_pool: use_pool = False else: pool_size = self.auth_pool_size pool_conn_lifetime = self.auth_pool_conn_lifetime conn = _get_connection(self.LDAP_URL, use_pool, use_auth_pool=end_user_auth) conn = KeystoneLDAPHandler(conn=conn) conn.connect(self.LDAP_URL, page_size=self.page_size, alias_dereferencing=self.alias_dereferencing, use_tls=self.use_tls, tls_cacertfile=self.tls_cacertfile, tls_cacertdir=self.tls_cacertdir, tls_req_cert=self.tls_req_cert, chase_referrals=self.chase_referrals, debug_level=self.debug_level, use_pool=use_pool, pool_size=pool_size, pool_retry_max=self.pool_retry_max, pool_retry_delay=self.pool_retry_delay, pool_conn_timeout=self.pool_conn_timeout, pool_conn_lifetime=pool_conn_lifetime ) if user is None: user = self.LDAP_USER if password is None: password = self.LDAP_PASSWORD # not all LDAP servers require authentication, so we don't bind # if we don't have any user/pass if user and password: conn.simple_bind_s(user, password) return conn def _id_to_dn_string(self, object_id): return u'%s=%s,%s' % (self.id_attr, ldap.dn.escape_dn_chars( six.text_type(object_id)), self.tree_dn) def _id_to_dn(self, object_id): if self.LDAP_SCOPE == ldap.SCOPE_ONELEVEL: return self._id_to_dn_string(object_id) with self.get_connection() as conn: search_result = conn.search_s( self.tree_dn, self.LDAP_SCOPE, u'(&(%(id_attr)s=%(id)s)(objectclass=%(objclass)s))' % {'id_attr': self.id_attr, 'id': ldap.filter.escape_filter_chars( six.text_type(object_id)), 'objclass': self.object_class}, attrlist=DN_ONLY) if search_result: dn, attrs = search_result[0] return dn else: return self._id_to_dn_string(object_id) @staticmethod def _dn_to_id(dn): return utf8_decode(ldap.dn.str2dn(utf8_encode(dn))[0][0][1]) def _ldap_res_to_model(self, res): # LDAP attribute names may be returned in a different case than # they are defined in the mapping, so we need to check for keys # in a case-insensitive way. We use the case specified in the # mapping for the model to ensure we have a predictable way of # retrieving values later. lower_res = {k.lower(): v for k, v in res[1].items()} id_attrs = lower_res.get(self.id_attr.lower()) if not id_attrs: message = _('ID attribute %(id_attr)s not found in LDAP ' 'object %(dn)s') % ({'id_attr': self.id_attr, 'dn': res[0]}) raise exception.NotFound(message=message) if len(id_attrs) > 1: # FIXME(gyee): if this is a multi-value attribute and it has # multiple values, we can't use it as ID. Retain the dn_to_id # logic here so it does not potentially break existing # deployments. We need to fix our read-write LDAP logic so # it does not get the ID from DN. message = _LW('ID attribute %(id_attr)s for LDAP object %(dn)s ' 'has multiple values and therefore cannot be used ' 'as an ID. Will get the ID from DN instead') % ( {'id_attr': self.id_attr, 'dn': res[0]}) LOG.warning(message) id_val = self._dn_to_id(res[0]) else: id_val = id_attrs[0] obj = self.model(id=id_val) for k in obj.known_keys: if k in self.attribute_ignore: continue try: map_attr = self.attribute_mapping.get(k, k) if map_attr is None: # Ignore attributes that are mapped to None. continue v = lower_res[map_attr.lower()] except KeyError: # nosec # Didn't find the attr, so don't add it. pass else: try: obj[k] = v[0] except IndexError: obj[k] = None return obj def check_allow_create(self): if not self.allow_create: action = _('LDAP %s create') % self.options_name raise exception.ForbiddenAction(action=action) def check_allow_update(self): if not self.allow_update: action = _('LDAP %s update') % self.options_name raise exception.ForbiddenAction(action=action) def check_allow_delete(self): if not self.allow_delete: action = _('LDAP %s delete') % self.options_name raise exception.ForbiddenAction(action=action) def affirm_unique(self, values): if values.get('name') is not None: try: self.get_by_name(values['name']) except exception.NotFound: # nosec # Didn't find it so it's unique, good. pass else: raise exception.Conflict(type=self.options_name, details=_('Duplicate name, %s.') % values['name']) if values.get('id') is not None: try: self.get(values['id']) except exception.NotFound: # nosec # Didn't find it, so it's unique, good. pass else: raise exception.Conflict(type=self.options_name, details=_('Duplicate ID, %s.') % values['id']) def create(self, values): self.affirm_unique(values) object_classes = self.structural_classes + [self.object_class] attrs = [('objectClass', object_classes)] for k, v in values.items(): if k in self.attribute_ignore: continue if k == 'id': # no need to check if v is None as 'id' will always have # a value attrs.append((self.id_attr, [v])) elif v is not None: attr_type = self.attribute_mapping.get(k, k) if attr_type is not None: attrs.append((attr_type, [v])) extra_attrs = [attr for attr, name in self.extra_attr_mapping.items() if name == k] for attr in extra_attrs: attrs.append((attr, [v])) if 'groupOfNames' in object_classes and self.use_dumb_member: attrs.append(('member', [self.dumb_member])) with self.get_connection() as conn: conn.add_s(self._id_to_dn(values['id']), attrs) return values def _ldap_get(self, object_id, ldap_filter=None): query = (u'(&(%(id_attr)s=%(id)s)' u'%(filter)s' u'(objectClass=%(object_class)s))' % {'id_attr': self.id_attr, 'id': ldap.filter.escape_filter_chars( six.text_type(object_id)), 'filter': (ldap_filter or self.ldap_filter or ''), 'object_class': self.object_class}) with self.get_connection() as conn: try: attrs = list(set(([self.id_attr] + list(self.attribute_mapping.values()) + list(self.extra_attr_mapping.keys())))) res = conn.search_s(self.tree_dn, self.LDAP_SCOPE, query, attrs) except ldap.NO_SUCH_OBJECT: return None try: return res[0] except IndexError: return None def _ldap_get_limited(self, base, scope, filterstr, attrlist, sizelimit): with self.get_connection() as conn: try: control = ldap.controls.libldap.SimplePagedResultsControl( criticality=True, size=sizelimit, cookie='') msgid = conn.search_ext(base, scope, filterstr, attrlist, serverctrls=[control]) rdata = conn.result3(msgid) return rdata except ldap.NO_SUCH_OBJECT: return [] @driver_hints.truncated def _ldap_get_all(self, hints, ldap_filter=None): query = u'(&%s(objectClass=%s)(%s=*))' % ( ldap_filter or self.ldap_filter or '', self.object_class, self.id_attr) sizelimit = 0 attrs = list(set(([self.id_attr] + list(self.attribute_mapping.values()) + list(self.extra_attr_mapping.keys())))) if hints.limit: sizelimit = hints.limit['limit'] return self._ldap_get_limited(self.tree_dn, self.LDAP_SCOPE, query, attrs, sizelimit) with self.get_connection() as conn: try: return conn.search_s(self.tree_dn, self.LDAP_SCOPE, query, attrs) except ldap.NO_SUCH_OBJECT: return [] def _ldap_get_list(self, search_base, scope, query_params=None, attrlist=None): query = u'(objectClass=%s)' % self.object_class if query_params: def calc_filter(attrname, value): val_esc = ldap.filter.escape_filter_chars(value) return '(%s=%s)' % (attrname, val_esc) query = (u'(&%s%s)' % (query, ''.join([calc_filter(k, v) for k, v in query_params.items()]))) with self.get_connection() as conn: return conn.search_s(search_base, scope, query, attrlist) def get(self, object_id, ldap_filter=None): res = self._ldap_get(object_id, ldap_filter) if res is None: raise self._not_found(object_id) else: return self._ldap_res_to_model(res) def get_by_name(self, name, ldap_filter=None): query = (u'(%s=%s)' % (self.attribute_mapping['name'], ldap.filter.escape_filter_chars( six.text_type(name)))) res = self.get_all(query) try: return res[0] except IndexError: raise self._not_found(name) def get_all(self, ldap_filter=None, hints=None): hints = hints or driver_hints.Hints() return [self._ldap_res_to_model(x) for x in self._ldap_get_all(hints, ldap_filter)] def update(self, object_id, values, old_obj=None): if old_obj is None: old_obj = self.get(object_id) modlist = [] for k, v in values.items(): if k == 'id': # id can't be modified. continue if k in self.attribute_ignore: # Handle 'enabled' specially since can't disable if ignored. if k == 'enabled' and (not v): action = _("Disabling an entity where the 'enable' " "attribute is ignored by configuration.") raise exception.ForbiddenAction(action=action) continue # attribute value has not changed if k in old_obj and old_obj[k] == v: continue if k in self.immutable_attrs: msg = (_("Cannot change %(option_name)s %(attr)s") % {'option_name': self.options_name, 'attr': k}) raise exception.ValidationError(msg) if v is None: if old_obj.get(k) is not None: modlist.append((ldap.MOD_DELETE, self.attribute_mapping.get(k, k), None)) continue current_value = old_obj.get(k) if current_value is None: op = ldap.MOD_ADD modlist.append((op, self.attribute_mapping.get(k, k), [v])) elif current_value != v: op = ldap.MOD_REPLACE modlist.append((op, self.attribute_mapping.get(k, k), [v])) if modlist: with self.get_connection() as conn: try: conn.modify_s(self._id_to_dn(object_id), modlist) except ldap.NO_SUCH_OBJECT: raise self._not_found(object_id) return self.get(object_id) def delete(self, object_id): with self.get_connection() as conn: try: conn.delete_s(self._id_to_dn(object_id)) except ldap.NO_SUCH_OBJECT: raise self._not_found(object_id) def delete_tree(self, object_id): tree_delete_control = ldap.controls.LDAPControl(CONTROL_TREEDELETE, 0, None) with self.get_connection() as conn: try: conn.delete_ext_s(self._id_to_dn(object_id), serverctrls=[tree_delete_control]) except ldap.NO_SUCH_OBJECT: raise self._not_found(object_id) except ldap.NOT_ALLOWED_ON_NONLEAF: # Most LDAP servers do not support the tree_delete_control. # In these servers, the usual idiom is to first perform a # search to get the entries to delete, then delete them in # in order of child to parent, since LDAP forbids the # deletion of a parent entry before deleting the children # of that parent. The simplest way to do that is to delete # the entries in order of the length of the DN, from longest # to shortest DN. dn = self._id_to_dn(object_id) scope = ldap.SCOPE_SUBTREE # With some directory servers, an entry with objectclass # ldapsubentry will not be returned unless it is explicitly # requested, by specifying the objectclass in the search # filter. We must specify this, with objectclass=*, in an # LDAP filter OR clause, in order to return all entries filt = '(|(objectclass=*)(objectclass=ldapsubentry))' # We only need the DNs of the entries. Since no attributes # will be returned, we do not have to specify attrsonly=1. entries = conn.search_s(dn, scope, filt, attrlist=DN_ONLY) if entries: for dn in sorted((e[0] for e in entries), key=len, reverse=True): conn.delete_s(dn) else: LOG.debug('No entries in LDAP subtree %s', dn) def add_member(self, member_dn, member_list_dn): """Add member to the member list. :param member_dn: DN of member to be added. :param member_list_dn: DN of group to which the member will be added. :raises keystone.exception.Conflict: If the user was already a member. :raises self.NotFound: If the group entry didn't exist. """ with self.get_connection() as conn: try: mod = (ldap.MOD_ADD, self.member_attribute, member_dn) conn.modify_s(member_list_dn, [mod]) except ldap.TYPE_OR_VALUE_EXISTS: raise exception.Conflict(_('Member %(member)s ' 'is already a member' ' of group %(group)s') % { 'member': member_dn, 'group': member_list_dn}) except ldap.NO_SUCH_OBJECT: raise self._not_found(member_list_dn) def remove_member(self, member_dn, member_list_dn): """Remove member from the member list. :param member_dn: DN of member to be removed. :param member_list_dn: DN of group from which the member will be removed. :raises self.NotFound: If the group entry didn't exist. :raises ldap.NO_SUCH_ATTRIBUTE: If the user wasn't a member. """ with self.get_connection() as conn: try: mod = (ldap.MOD_DELETE, self.member_attribute, member_dn) conn.modify_s(member_list_dn, [mod]) except ldap.NO_SUCH_OBJECT: raise self._not_found(member_list_dn) def _delete_tree_nodes(self, search_base, scope, query_params=None): query = u'(objectClass=%s)' % self.object_class if query_params: query = (u'(&%s%s)' % (query, ''.join(['(%s=%s)' % (k, ldap.filter.escape_filter_chars(v)) for k, v in query_params.items()]))) not_deleted_nodes = [] with self.get_connection() as conn: try: nodes = conn.search_s(search_base, scope, query, attrlist=DN_ONLY) except ldap.NO_SUCH_OBJECT: LOG.debug('Could not find entry with dn=%s', search_base) raise self._not_found(self._dn_to_id(search_base)) else: for node_dn, _t in nodes: try: conn.delete_s(node_dn) except ldap.NO_SUCH_OBJECT: not_deleted_nodes.append(node_dn) if not_deleted_nodes: LOG.warning(_LW("When deleting entries for %(search_base)s, " "could not delete nonexistent entries " "%(entries)s%(dots)s"), {'search_base': search_base, 'entries': not_deleted_nodes[:3], 'dots': '...' if len(not_deleted_nodes) > 3 else ''}) def filter_query(self, hints, query=None): """Applies filtering to a query. :param hints: contains the list of filters, which may be None, indicating that there are no filters to be applied. If it's not None, then any filters satisfied here will be removed so that the caller will know if any filters remain to be applied. :param query: LDAP query into which to include filters :returns query: LDAP query, updated with any filters satisfied """ def build_filter(filter_, hints): """Build a filter for the query. :param filter_: the dict that describes this filter :param hints: contains the list of filters yet to be satisfied. :returns query: LDAP query term to be added """ ldap_attr = self.attribute_mapping[filter_['name']] val_esc = ldap.filter.escape_filter_chars(filter_['value']) if filter_['case_sensitive']: # NOTE(henry-nash): Although dependent on the schema being # used, most LDAP attributes are configured with case # insensitive matching rules, so we'll leave this to the # controller to filter. return if filter_['name'] == 'enabled': # NOTE(henry-nash): Due to the different options for storing # the enabled attribute (e,g, emulated or not), for now we # don't try and filter this at the driver level - we simply # leave the filter to be handled by the controller. It seems # unlikley that this will cause a signifcant performance # issue. return # TODO(henry-nash): Currently there are no booleans (other than # 'enabled' that is handled above) on which you can filter. If # there were, we would need to add special handling here to # convert the booleans values to 'TRUE' and 'FALSE'. To do that # we would also need to know which filter keys were actually # booleans (this is related to bug #1411478). if filter_['comparator'] == 'equals': query_term = (u'(%(attr)s=%(val)s)' % {'attr': ldap_attr, 'val': val_esc}) elif filter_['comparator'] == 'contains': query_term = (u'(%(attr)s=*%(val)s*)' % {'attr': ldap_attr, 'val': val_esc}) elif filter_['comparator'] == 'startswith': query_term = (u'(%(attr)s=%(val)s*)' % {'attr': ldap_attr, 'val': val_esc}) elif filter_['comparator'] == 'endswith': query_term = (u'(%(attr)s=*%(val)s)' % {'attr': ldap_attr, 'val': val_esc}) else: # It's a filter we don't understand, so let the caller # work out if they need to do something with it. return return query_term if query is None: # make sure query is a string so the ldap filter is properly # constructed from filter_list later query = '' if hints is None: return query filter_list = [] satisfied_filters = [] for filter_ in hints.filters: if filter_['name'] not in self.attribute_mapping: continue new_filter = build_filter(filter_, hints) if new_filter is not None: filter_list.append(new_filter) satisfied_filters.append(filter_) if filter_list: query = u'(&%s%s)' % (query, ''.join(filter_list)) # Remove satisfied filters, then the caller will know remaining filters for filter_ in satisfied_filters: hints.filters.remove(filter_) return query class EnabledEmuMixIn(BaseLdap): """Emulates boolean 'enabled' attribute if turned on. Creates a group holding all enabled objects of this class, all missing objects are considered disabled. Options: * $name_enabled_emulation - boolean, on/off * $name_enabled_emulation_dn - DN of that group, default is cn=enabled_${name}s,${tree_dn} * $name_enabled_emulation_use_group_config - boolean, on/off Where ${name}s is the plural of self.options_name ('users' or 'tenants'), ${tree_dn} is self.tree_dn. """ DEFAULT_GROUP_OBJECTCLASS = 'groupOfNames' DEFAULT_MEMBER_ATTRIBUTE = 'member' def __init__(self, conf): super(EnabledEmuMixIn, self).__init__(conf) enabled_emulation = '%s_enabled_emulation' % self.options_name self.enabled_emulation = getattr(conf.ldap, enabled_emulation) enabled_emulation_dn = '%s_enabled_emulation_dn' % self.options_name self.enabled_emulation_dn = getattr(conf.ldap, enabled_emulation_dn) use_group_config = ('%s_enabled_emulation_use_group_config' % self.options_name) self.use_group_config = getattr(conf.ldap, use_group_config) if not self.use_group_config: self.member_attribute = self.DEFAULT_MEMBER_ATTRIBUTE self.group_objectclass = self.DEFAULT_GROUP_OBJECTCLASS else: self.member_attribute = conf.ldap.group_member_attribute self.group_objectclass = conf.ldap.group_objectclass if not self.enabled_emulation_dn: naming_attr_name = 'cn' naming_attr_value = 'enabled_%ss' % self.options_name sub_vals = (naming_attr_name, naming_attr_value, self.tree_dn) self.enabled_emulation_dn = '%s=%s,%s' % sub_vals naming_attr = (naming_attr_name, [naming_attr_value]) else: # Extract the attribute name and value from the configured DN. naming_dn = ldap.dn.str2dn(utf8_encode(self.enabled_emulation_dn)) naming_rdn = naming_dn[0][0] naming_attr = (utf8_decode(naming_rdn[0]), utf8_decode(naming_rdn[1])) self.enabled_emulation_naming_attr = naming_attr def _get_enabled(self, object_id, conn): dn = self._id_to_dn(object_id) query = '(%s=%s)' % (self.member_attribute, ldap.filter.escape_filter_chars(dn)) try: enabled_value = conn.search_s(self.enabled_emulation_dn, ldap.SCOPE_BASE, query, attrlist=DN_ONLY) except ldap.NO_SUCH_OBJECT: return False else: return bool(enabled_value) def _add_enabled(self, object_id): with self.get_connection() as conn: if not self._get_enabled(object_id, conn): modlist = [(ldap.MOD_ADD, self.member_attribute, [self._id_to_dn(object_id)])] try: conn.modify_s(self.enabled_emulation_dn, modlist) except ldap.NO_SUCH_OBJECT: attr_list = [('objectClass', [self.group_objectclass]), (self.member_attribute, [self._id_to_dn(object_id)]), self.enabled_emulation_naming_attr] if self.use_dumb_member: attr_list[1][1].append(self.dumb_member) conn.add_s(self.enabled_emulation_dn, attr_list) def _remove_enabled(self, object_id): modlist = [(ldap.MOD_DELETE, self.member_attribute, [self._id_to_dn(object_id)])] with self.get_connection() as conn: try: conn.modify_s(self.enabled_emulation_dn, modlist) except (ldap.NO_SUCH_OBJECT, ldap.NO_SUCH_ATTRIBUTE): # nosec # It's already gone, good. pass def create(self, values): if self.enabled_emulation: enabled_value = values.pop('enabled', True) ref = super(EnabledEmuMixIn, self).create(values) if 'enabled' not in self.attribute_ignore: if enabled_value: self._add_enabled(ref['id']) ref['enabled'] = enabled_value return ref else: return super(EnabledEmuMixIn, self).create(values) def get(self, object_id, ldap_filter=None): with self.get_connection() as conn: ref = super(EnabledEmuMixIn, self).get(object_id, ldap_filter) if ('enabled' not in self.attribute_ignore and self.enabled_emulation): ref['enabled'] = self._get_enabled(object_id, conn) return ref def get_all(self, ldap_filter=None, hints=None): hints = hints or driver_hints.Hints() if 'enabled' not in self.attribute_ignore and self.enabled_emulation: # had to copy BaseLdap.get_all here to ldap_filter by DN tenant_list = [self._ldap_res_to_model(x) for x in self._ldap_get_all(hints, ldap_filter) if x[0] != self.enabled_emulation_dn] with self.get_connection() as conn: for tenant_ref in tenant_list: tenant_ref['enabled'] = self._get_enabled( tenant_ref['id'], conn) return tenant_list else: return super(EnabledEmuMixIn, self).get_all(ldap_filter, hints) def update(self, object_id, values, old_obj=None): if 'enabled' not in self.attribute_ignore and self.enabled_emulation: data = values.copy() enabled_value = data.pop('enabled', None) ref = super(EnabledEmuMixIn, self).update(object_id, data, old_obj) if enabled_value is not None: if enabled_value: self._add_enabled(object_id) else: self._remove_enabled(object_id) ref['enabled'] = enabled_value return ref else: return super(EnabledEmuMixIn, self).update( object_id, values, old_obj) def delete(self, object_id): if self.enabled_emulation: self._remove_enabled(object_id) super(EnabledEmuMixIn, self).delete(object_id) keystone-9.0.0/keystone/middleware/0000775000567000056710000000000012701407246020503 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/middleware/auth.py0000664000567000056710000002213712701407102022012 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_context import context as oslo_context from oslo_log import log from oslo_log import versionutils from keystone.common import authorization from keystone.common import tokenless_auth from keystone.common import wsgi from keystone import exception from keystone.federation import constants as federation_constants from keystone.federation import utils from keystone.i18n import _, _LI, _LW from keystone.middleware import core from keystone.models import token_model from keystone.token.providers import common CONF = cfg.CONF LOG = log.getLogger(__name__) __all__ = ('AuthContextMiddleware',) class AuthContextMiddleware(wsgi.Middleware): """Build the authentication context from the request auth token.""" def _build_auth_context(self, request): # NOTE(gyee): token takes precedence over SSL client certificates. # This will preserve backward compatibility with the existing # behavior. Tokenless authorization with X.509 SSL client # certificate is effectively disabled if no trusted issuers are # provided. token_id = None if core.AUTH_TOKEN_HEADER in request.headers: token_id = request.headers[core.AUTH_TOKEN_HEADER].strip() is_admin = request.environ.get(core.CONTEXT_ENV, {}).get('is_admin', False) if is_admin: # NOTE(gyee): no need to proceed any further as we already know # this is an admin request. auth_context = {} return auth_context, token_id, is_admin if token_id: # In this case the client sent in a token. auth_context, is_admin = self._build_token_auth_context( request, token_id) return auth_context, token_id, is_admin # No token, maybe the client presented an X.509 certificate. if self._validate_trusted_issuer(request.environ): auth_context = self._build_tokenless_auth_context( request.environ) return auth_context, None, False LOG.debug('There is either no auth token in the request or ' 'the certificate issuer is not trusted. No auth ' 'context will be set.') return None, None, False def _build_token_auth_context(self, request, token_id): if CONF.admin_token and token_id == CONF.admin_token: versionutils.report_deprecated_feature( LOG, _LW('build_auth_context middleware checking for the admin ' 'token is deprecated as of the Mitaka release and will be ' 'removed in the O release. If your deployment requires ' 'use of the admin token, update keystone-paste.ini so ' 'that admin_token_auth is before build_auth_context in ' 'the paste pipelines, otherwise remove the ' 'admin_token_auth middleware from the paste pipelines.')) return {}, True context = {'token_id': token_id} context['environment'] = request.environ try: token_ref = token_model.KeystoneToken( token_id=token_id, token_data=self.token_provider_api.validate_token(token_id)) # TODO(gyee): validate_token_bind should really be its own # middleware wsgi.validate_token_bind(context, token_ref) return authorization.token_to_auth_context(token_ref), False except exception.TokenNotFound: LOG.warning(_LW('RBAC: Invalid token')) raise exception.Unauthorized() def _build_tokenless_auth_context(self, env): """Build the authentication context. The context is built from the attributes provided in the env, such as certificate and scope attributes. """ tokenless_helper = tokenless_auth.TokenlessAuthHelper(env) (domain_id, project_id, trust_ref, unscoped) = ( tokenless_helper.get_scope()) user_ref = tokenless_helper.get_mapped_user( project_id, domain_id) # NOTE(gyee): if it is an ephemeral user, the # given X.509 SSL client cert does not need to map to # an existing user. if user_ref['type'] == utils.UserType.EPHEMERAL: auth_context = {} auth_context['group_ids'] = user_ref['group_ids'] auth_context[federation_constants.IDENTITY_PROVIDER] = ( user_ref[federation_constants.IDENTITY_PROVIDER]) auth_context[federation_constants.PROTOCOL] = ( user_ref[federation_constants.PROTOCOL]) if domain_id and project_id: msg = _('Scoping to both domain and project is not allowed') raise ValueError(msg) if domain_id: auth_context['domain_id'] = domain_id if project_id: auth_context['project_id'] = project_id auth_context['roles'] = user_ref['roles'] else: # it's the local user, so token data is needed. token_helper = common.V3TokenDataHelper() token_data = token_helper.get_token_data( user_id=user_ref['id'], method_names=[CONF.tokenless_auth.protocol], domain_id=domain_id, project_id=project_id) auth_context = {'user_id': user_ref['id']} auth_context['is_delegated_auth'] = False if domain_id: auth_context['domain_id'] = domain_id if project_id: auth_context['project_id'] = project_id auth_context['roles'] = [role['name'] for role in token_data['token']['roles']] return auth_context def _validate_trusted_issuer(self, env): """To further filter the certificates that are trusted. If the config option 'trusted_issuer' is absent or does not contain the trusted issuer DN, no certificates will be allowed in tokenless authorization. :param env: The env contains the client issuer's attributes :type env: dict :returns: True if client_issuer is trusted; otherwise False """ if not CONF.tokenless_auth.trusted_issuer: return False client_issuer = env.get(CONF.tokenless_auth.issuer_attribute) if not client_issuer: msg = _LI('Cannot find client issuer in env by the ' 'issuer attribute - %s.') LOG.info(msg, CONF.tokenless_auth.issuer_attribute) return False if client_issuer in CONF.tokenless_auth.trusted_issuer: return True msg = _LI('The client issuer %(client_issuer)s does not match with ' 'the trusted issuer %(trusted_issuer)s') LOG.info( msg, {'client_issuer': client_issuer, 'trusted_issuer': CONF.tokenless_auth.trusted_issuer}) return False def process_request(self, request): # The request context stores itself in thread-local memory for logging. request_context = oslo_context.RequestContext( request_id=request.environ.get('openstack.request_id')) if authorization.AUTH_CONTEXT_ENV in request.environ: msg = _LW('Auth context already exists in the request ' 'environment; it will be used for authorization ' 'instead of creating a new one.') LOG.warning(msg) return auth_context, token_id, is_admin = self._build_auth_context(request) request_context.auth_token = token_id request_context.is_admin = is_admin if auth_context is None: # The client didn't send any auth info, so don't set auth context. return # The attributes of request_context are put into the logs. This is a # common pattern for all the OpenStack services. In all the other # projects these are IDs, so set the attributes to IDs here rather than # the name. request_context.user = auth_context.get('user_id') request_context.tenant = auth_context.get('project_id') request_context.domain = auth_context.get('domain_id') request_context.user_domain = auth_context.get('user_domain_id') request_context.project_domain = auth_context.get('project_domain_id') request_context.update_store() LOG.debug('RBAC: auth_context: %s', auth_context) request.environ[authorization.AUTH_CONTEXT_ENV] = auth_context keystone-9.0.0/keystone/middleware/__init__.py0000664000567000056710000000125012701407102022601 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.middleware.auth import * # noqa from keystone.middleware.core import * # noqa keystone-9.0.0/keystone/middleware/core.py0000664000567000056710000001145612701407102022003 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log from oslo_serialization import jsonutils from keystone.common import wsgi from keystone import exception from keystone.i18n import _LW CONF = cfg.CONF LOG = log.getLogger(__name__) # Header used to transmit the auth token AUTH_TOKEN_HEADER = 'X-Auth-Token' # Header used to transmit the subject token SUBJECT_TOKEN_HEADER = 'X-Subject-Token' # Environment variable used to pass the request context CONTEXT_ENV = wsgi.CONTEXT_ENV # Environment variable used to pass the request params PARAMS_ENV = wsgi.PARAMS_ENV class TokenAuthMiddleware(wsgi.Middleware): def process_request(self, request): token = request.headers.get(AUTH_TOKEN_HEADER) context = request.environ.get(CONTEXT_ENV, {}) context['token_id'] = token if SUBJECT_TOKEN_HEADER in request.headers: context['subject_token_id'] = request.headers[SUBJECT_TOKEN_HEADER] request.environ[CONTEXT_ENV] = context class AdminTokenAuthMiddleware(wsgi.Middleware): """A trivial filter that checks for a pre-defined admin token. Sets 'is_admin' to true in the context, expected to be checked by methods that are admin-only. """ def __init__(self, application): super(AdminTokenAuthMiddleware, self).__init__(application) LOG.warning(_LW("The admin_token_auth middleware presents a security " "risk and should be removed from the " "[pipeline:api_v3], [pipeline:admin_api], and " "[pipeline:public_api] sections of your paste ini " "file.")) def process_request(self, request): token = request.headers.get(AUTH_TOKEN_HEADER) context = request.environ.get(CONTEXT_ENV, {}) context['is_admin'] = CONF.admin_token and (token == CONF.admin_token) request.environ[CONTEXT_ENV] = context class JsonBodyMiddleware(wsgi.Middleware): """Middleware to allow method arguments to be passed as serialized JSON. Accepting arguments as JSON is useful for accepting data that may be more complex than simple primitives. Filters out the parameters `self`, `context` and anything beginning with an underscore. """ def process_request(self, request): # Abort early if we don't have any work to do params_json = request.body if not params_json: return # Reject unrecognized content types. Empty string indicates # the client did not explicitly set the header if request.content_type not in ('application/json', ''): e = exception.ValidationError(attribute='application/json', target='Content-Type header') return wsgi.render_exception(e, request=request) params_parsed = {} try: params_parsed = jsonutils.loads(params_json) except ValueError: e = exception.ValidationError(attribute='valid JSON', target='request body') return wsgi.render_exception(e, request=request) finally: if not params_parsed: params_parsed = {} if not isinstance(params_parsed, dict): e = exception.ValidationError(attribute='valid JSON object', target='request body') return wsgi.render_exception(e, request=request) params = {} for k, v in params_parsed.items(): if k in ('self', 'context'): continue if k.startswith('_'): continue params[k] = v request.environ[PARAMS_ENV] = params class NormalizingFilter(wsgi.Middleware): """Middleware filter to handle URL normalization.""" def process_request(self, request): """Normalizes URLs.""" # Removes a trailing slash from the given path, if any. if (len(request.environ['PATH_INFO']) > 1 and request.environ['PATH_INFO'][-1] == '/'): request.environ['PATH_INFO'] = request.environ['PATH_INFO'][:-1] # Rewrites path to root if no path is given. elif not request.environ['PATH_INFO']: request.environ['PATH_INFO'] = '/' keystone-9.0.0/keystone/endpoint_policy/0000775000567000056710000000000012701407246021565 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/endpoint_policy/backends/0000775000567000056710000000000012701407246023337 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/endpoint_policy/backends/__init__.py0000664000567000056710000000000012701407102025425 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/endpoint_policy/backends/sql.py0000664000567000056710000001373512701407102024510 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import sqlalchemy from keystone.common import sql from keystone import exception class PolicyAssociation(sql.ModelBase, sql.ModelDictMixin): __tablename__ = 'policy_association' attributes = ['policy_id', 'endpoint_id', 'region_id', 'service_id'] # The id column is never exposed outside this module. It only exists to # provide a primary key, given that the real columns we would like to use # (endpoint_id, service_id, region_id) can be null id = sql.Column(sql.String(64), primary_key=True) policy_id = sql.Column(sql.String(64), nullable=False) endpoint_id = sql.Column(sql.String(64), nullable=True) service_id = sql.Column(sql.String(64), nullable=True) region_id = sql.Column(sql.String(64), nullable=True) __table_args__ = (sql.UniqueConstraint('endpoint_id', 'service_id', 'region_id'),) def to_dict(self): """Returns the model's attributes as a dictionary. We override the standard method in order to hide the id column, since this only exists to provide the table with a primary key. """ d = {} for attr in self.__class__.attributes: d[attr] = getattr(self, attr) return d class EndpointPolicy(object): def create_policy_association(self, policy_id, endpoint_id=None, service_id=None, region_id=None): with sql.session_for_write() as session: try: # See if there is already a row for this association, and if # so, update it with the new policy_id query = session.query(PolicyAssociation) query = query.filter_by(endpoint_id=endpoint_id) query = query.filter_by(service_id=service_id) query = query.filter_by(region_id=region_id) association = query.one() association.policy_id = policy_id except sql.NotFound: association = PolicyAssociation(id=uuid.uuid4().hex, policy_id=policy_id, endpoint_id=endpoint_id, service_id=service_id, region_id=region_id) session.add(association) def check_policy_association(self, policy_id, endpoint_id=None, service_id=None, region_id=None): sql_constraints = sqlalchemy.and_( PolicyAssociation.policy_id == policy_id, PolicyAssociation.endpoint_id == endpoint_id, PolicyAssociation.service_id == service_id, PolicyAssociation.region_id == region_id) # NOTE(henry-nash): Getting a single value to save object # management overhead. with sql.session_for_read() as session: if session.query(PolicyAssociation.id).filter( sql_constraints).distinct().count() == 0: raise exception.PolicyAssociationNotFound() def delete_policy_association(self, policy_id, endpoint_id=None, service_id=None, region_id=None): with sql.session_for_write() as session: query = session.query(PolicyAssociation) query = query.filter_by(policy_id=policy_id) query = query.filter_by(endpoint_id=endpoint_id) query = query.filter_by(service_id=service_id) query = query.filter_by(region_id=region_id) query.delete() def get_policy_association(self, endpoint_id=None, service_id=None, region_id=None): sql_constraints = sqlalchemy.and_( PolicyAssociation.endpoint_id == endpoint_id, PolicyAssociation.service_id == service_id, PolicyAssociation.region_id == region_id) try: with sql.session_for_read() as session: policy_id = session.query(PolicyAssociation.policy_id).filter( sql_constraints).distinct().one() return {'policy_id': policy_id} except sql.NotFound: raise exception.PolicyAssociationNotFound() def list_associations_for_policy(self, policy_id): with sql.session_for_read() as session: query = session.query(PolicyAssociation) query = query.filter_by(policy_id=policy_id) return [ref.to_dict() for ref in query.all()] def delete_association_by_endpoint(self, endpoint_id): with sql.session_for_write() as session: query = session.query(PolicyAssociation) query = query.filter_by(endpoint_id=endpoint_id) query.delete() def delete_association_by_service(self, service_id): with sql.session_for_write() as session: query = session.query(PolicyAssociation) query = query.filter_by(service_id=service_id) query.delete() def delete_association_by_region(self, region_id): with sql.session_for_write() as session: query = session.query(PolicyAssociation) query = query.filter_by(region_id=region_id) query.delete() def delete_association_by_policy(self, policy_id): with sql.session_for_write() as session: query = session.query(PolicyAssociation) query = query.filter_by(policy_id=policy_id) query.delete() keystone-9.0.0/keystone/endpoint_policy/__init__.py0000664000567000056710000000112612701407102023665 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.endpoint_policy.core import * # noqa keystone-9.0.0/keystone/endpoint_policy/core.py0000664000567000056710000004222112701407102023057 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_config import cfg from oslo_log import log import six from keystone.common import dependency from keystone.common import manager from keystone import exception from keystone.i18n import _, _LE, _LW CONF = cfg.CONF LOG = log.getLogger(__name__) @dependency.provider('endpoint_policy_api') @dependency.requires('catalog_api', 'policy_api') class Manager(manager.Manager): """Default pivot point for the Endpoint Policy backend. See :mod:`keystone.common.manager.Manager` for more details on how this dynamically calls the backend. """ driver_namespace = 'keystone.endpoint_policy' def __init__(self): super(Manager, self).__init__(CONF.endpoint_policy.driver) def _assert_valid_association(self, endpoint_id, service_id, region_id): """Assert that the association is supported. There are three types of association supported: - Endpoint (in which case service and region must be None) - Service and region (in which endpoint must be None) - Service (in which case endpoint and region must be None) """ if (endpoint_id is not None and service_id is None and region_id is None): return if (service_id is not None and region_id is not None and endpoint_id is None): return if (service_id is not None and endpoint_id is None and region_id is None): return raise exception.InvalidPolicyAssociation(endpoint_id=endpoint_id, service_id=service_id, region_id=region_id) def create_policy_association(self, policy_id, endpoint_id=None, service_id=None, region_id=None): self._assert_valid_association(endpoint_id, service_id, region_id) self.driver.create_policy_association(policy_id, endpoint_id, service_id, region_id) def check_policy_association(self, policy_id, endpoint_id=None, service_id=None, region_id=None): self._assert_valid_association(endpoint_id, service_id, region_id) self.driver.check_policy_association(policy_id, endpoint_id, service_id, region_id) def delete_policy_association(self, policy_id, endpoint_id=None, service_id=None, region_id=None): self._assert_valid_association(endpoint_id, service_id, region_id) self.driver.delete_policy_association(policy_id, endpoint_id, service_id, region_id) def list_endpoints_for_policy(self, policy_id): def _get_endpoint(endpoint_id, policy_id): try: return self.catalog_api.get_endpoint(endpoint_id) except exception.EndpointNotFound: msg = _LW('Endpoint %(endpoint_id)s referenced in ' 'association for policy %(policy_id)s not found.') LOG.warning(msg, {'policy_id': policy_id, 'endpoint_id': endpoint_id}) raise def _get_endpoints_for_service(service_id, endpoints): # TODO(henry-nash): Consider optimizing this in the future by # adding an explicit list_endpoints_for_service to the catalog API. return [ep for ep in endpoints if ep['service_id'] == service_id] def _get_endpoints_for_service_and_region( service_id, region_id, endpoints, regions): # TODO(henry-nash): Consider optimizing this in the future. # The lack of a two-way pointer in the region tree structure # makes this somewhat inefficient. def _recursively_get_endpoints_for_region( region_id, service_id, endpoint_list, region_list, endpoints_found, regions_examined): """Recursively search down a region tree for endpoints. :param region_id: the point in the tree to examine :param service_id: the service we are interested in :param endpoint_list: list of all endpoints :param region_list: list of all regions :param endpoints_found: list of matching endpoints found so far - which will be updated if more are found in this iteration :param regions_examined: list of regions we have already looked at - used to spot illegal circular references in the tree to avoid never completing search :returns: list of endpoints that match """ if region_id in regions_examined: msg = _LE('Circular reference or a repeated entry found ' 'in region tree - %(region_id)s.') LOG.error(msg, {'region_id': ref.region_id}) return regions_examined.append(region_id) endpoints_found += ( [ep for ep in endpoint_list if ep['service_id'] == service_id and ep['region_id'] == region_id]) for region in region_list: if region['parent_region_id'] == region_id: _recursively_get_endpoints_for_region( region['id'], service_id, endpoints, regions, endpoints_found, regions_examined) endpoints_found = [] regions_examined = [] # Now walk down the region tree _recursively_get_endpoints_for_region( region_id, service_id, endpoints, regions, endpoints_found, regions_examined) return endpoints_found matching_endpoints = [] endpoints = self.catalog_api.list_endpoints() regions = self.catalog_api.list_regions() for ref in self.list_associations_for_policy(policy_id): if ref.get('endpoint_id') is not None: matching_endpoints.append( _get_endpoint(ref['endpoint_id'], policy_id)) continue if (ref.get('service_id') is not None and ref.get('region_id') is None): matching_endpoints += _get_endpoints_for_service( ref['service_id'], endpoints) continue if (ref.get('service_id') is not None and ref.get('region_id') is not None): matching_endpoints += ( _get_endpoints_for_service_and_region( ref['service_id'], ref['region_id'], endpoints, regions)) continue msg = _LW('Unsupported policy association found - ' 'Policy %(policy_id)s, Endpoint %(endpoint_id)s, ' 'Service %(service_id)s, Region %(region_id)s, ') LOG.warning(msg, {'policy_id': policy_id, 'endpoint_id': ref['endpoint_id'], 'service_id': ref['service_id'], 'region_id': ref['region_id']}) return matching_endpoints def get_policy_for_endpoint(self, endpoint_id): def _get_policy(policy_id, endpoint_id): try: return self.policy_api.get_policy(policy_id) except exception.PolicyNotFound: msg = _LW('Policy %(policy_id)s referenced in association ' 'for endpoint %(endpoint_id)s not found.') LOG.warning(msg, {'policy_id': policy_id, 'endpoint_id': endpoint_id}) raise def _look_for_policy_for_region_and_service(endpoint): """Look in the region and its parents for a policy. Examine the region of the endpoint for a policy appropriate for the service of the endpoint. If there isn't a match, then chase up the region tree to find one. """ region_id = endpoint['region_id'] regions_examined = [] while region_id is not None: try: ref = self.get_policy_association( service_id=endpoint['service_id'], region_id=region_id) return ref['policy_id'] except exception.PolicyAssociationNotFound: # nosec # There wasn't one for that region & service, handle below. pass # There wasn't one for that region & service, let's # chase up the region tree regions_examined.append(region_id) region = self.catalog_api.get_region(region_id) region_id = None if region.get('parent_region_id') is not None: region_id = region['parent_region_id'] if region_id in regions_examined: msg = _LE('Circular reference or a repeated entry ' 'found in region tree - %(region_id)s.') LOG.error(msg, {'region_id': region_id}) break # First let's see if there is a policy explicitly defined for # this endpoint. try: ref = self.get_policy_association(endpoint_id=endpoint_id) return _get_policy(ref['policy_id'], endpoint_id) except exception.PolicyAssociationNotFound: # nosec # There wasn't a policy explicitly defined for this endpoint, # handled below. pass # There wasn't a policy explicitly defined for this endpoint, so # now let's see if there is one for the Region & Service. endpoint = self.catalog_api.get_endpoint(endpoint_id) policy_id = _look_for_policy_for_region_and_service(endpoint) if policy_id is not None: return _get_policy(policy_id, endpoint_id) # Finally, just check if there is one for the service. try: ref = self.get_policy_association( service_id=endpoint['service_id']) return _get_policy(ref['policy_id'], endpoint_id) except exception.PolicyAssociationNotFound: # nosec # No policy is associated with endpoint, handled below. pass msg = _('No policy is associated with endpoint ' '%(endpoint_id)s.') % {'endpoint_id': endpoint_id} raise exception.NotFound(msg) @six.add_metaclass(abc.ABCMeta) class EndpointPolicyDriverV8(object): """Interface description for an Endpoint Policy driver.""" @abc.abstractmethod def create_policy_association(self, policy_id, endpoint_id=None, service_id=None, region_id=None): """Creates a policy association. :param policy_id: identity of policy that is being associated :type policy_id: string :param endpoint_id: identity of endpoint to associate :type endpoint_id: string :param service_id: identity of the service to associate :type service_id: string :param region_id: identity of the region to associate :type region_id: string :returns: None There are three types of association permitted: - Endpoint (in which case service and region must be None) - Service and region (in which endpoint must be None) - Service (in which case endpoint and region must be None) """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def check_policy_association(self, policy_id, endpoint_id=None, service_id=None, region_id=None): """Checks existence a policy association. :param policy_id: identity of policy that is being associated :type policy_id: string :param endpoint_id: identity of endpoint to associate :type endpoint_id: string :param service_id: identity of the service to associate :type service_id: string :param region_id: identity of the region to associate :type region_id: string :raises keystone.exception.PolicyAssociationNotFound: If there is no match for the specified association. :returns: None """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_policy_association(self, policy_id, endpoint_id=None, service_id=None, region_id=None): """Deletes a policy association. :param policy_id: identity of policy that is being associated :type policy_id: string :param endpoint_id: identity of endpoint to associate :type endpoint_id: string :param service_id: identity of the service to associate :type service_id: string :param region_id: identity of the region to associate :type region_id: string :returns: None """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_policy_association(self, endpoint_id=None, service_id=None, region_id=None): """Gets the policy for an explicit association. This method is not exposed as a public API, but is used by get_policy_for_endpoint(). :param endpoint_id: identity of endpoint :type endpoint_id: string :param service_id: identity of the service :type service_id: string :param region_id: identity of the region :type region_id: string :raises keystone.exception.PolicyAssociationNotFound: If there is no match for the specified association. :returns: dict containing policy_id """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_associations_for_policy(self, policy_id): """List the associations for a policy. This method is not exposed as a public API, but is used by list_endpoints_for_policy(). :param policy_id: identity of policy :type policy_id: string :returns: List of association dicts """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_endpoints_for_policy(self, policy_id): """List all the endpoints using a given policy. :param policy_id: identity of policy that is being associated :type policy_id: string :returns: list of endpoints that have an effective association with that policy """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_policy_for_endpoint(self, endpoint_id): """Get the appropriate policy for a given endpoint. :param endpoint_id: identity of endpoint :type endpoint_id: string :returns: Policy entity for the endpoint """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_association_by_endpoint(self, endpoint_id): """Removes all the policy associations with the specific endpoint. :param endpoint_id: identity of endpoint to check :type endpoint_id: string :returns: None """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_association_by_service(self, service_id): """Removes all the policy associations with the specific service. :param service_id: identity of endpoint to check :type service_id: string :returns: None """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_association_by_region(self, region_id): """Removes all the policy associations with the specific region. :param region_id: identity of endpoint to check :type region_id: string :returns: None """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_association_by_policy(self, policy_id): """Removes all the policy associations with the specific policy. :param policy_id: identity of endpoint to check :type policy_id: string :returns: None """ raise exception.NotImplemented() # pragma: no cover Driver = manager.create_legacy_driver(EndpointPolicyDriverV8) keystone-9.0.0/keystone/endpoint_policy/controllers.py0000664000567000056710000001653612701407102024507 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common import controller from keystone.common import dependency from keystone import notifications @dependency.requires('policy_api', 'catalog_api', 'endpoint_policy_api') class EndpointPolicyV3Controller(controller.V3Controller): collection_name = 'endpoints' member_name = 'endpoint' def __init__(self): super(EndpointPolicyV3Controller, self).__init__() notifications.register_event_callback( 'deleted', 'endpoint', self._on_endpoint_delete) notifications.register_event_callback( 'deleted', 'service', self._on_service_delete) notifications.register_event_callback( 'deleted', 'region', self._on_region_delete) notifications.register_event_callback( 'deleted', 'policy', self._on_policy_delete) def _on_endpoint_delete(self, service, resource_type, operation, payload): self.endpoint_policy_api.delete_association_by_endpoint( payload['resource_info']) def _on_service_delete(self, service, resource_type, operation, payload): self.endpoint_policy_api.delete_association_by_service( payload['resource_info']) def _on_region_delete(self, service, resource_type, operation, payload): self.endpoint_policy_api.delete_association_by_region( payload['resource_info']) def _on_policy_delete(self, service, resource_type, operation, payload): self.endpoint_policy_api.delete_association_by_policy( payload['resource_info']) @controller.protected() def create_policy_association_for_endpoint(self, context, policy_id, endpoint_id): """Create an association between a policy and an endpoint.""" self.policy_api.get_policy(policy_id) self.catalog_api.get_endpoint(endpoint_id) self.endpoint_policy_api.create_policy_association( policy_id, endpoint_id=endpoint_id) @controller.protected() def check_policy_association_for_endpoint(self, context, policy_id, endpoint_id): """Check an association between a policy and an endpoint.""" self.policy_api.get_policy(policy_id) self.catalog_api.get_endpoint(endpoint_id) self.endpoint_policy_api.check_policy_association( policy_id, endpoint_id=endpoint_id) @controller.protected() def delete_policy_association_for_endpoint(self, context, policy_id, endpoint_id): """Delete an association between a policy and an endpoint.""" self.policy_api.get_policy(policy_id) self.catalog_api.get_endpoint(endpoint_id) self.endpoint_policy_api.delete_policy_association( policy_id, endpoint_id=endpoint_id) @controller.protected() def create_policy_association_for_service(self, context, policy_id, service_id): """Create an association between a policy and a service.""" self.policy_api.get_policy(policy_id) self.catalog_api.get_service(service_id) self.endpoint_policy_api.create_policy_association( policy_id, service_id=service_id) @controller.protected() def check_policy_association_for_service(self, context, policy_id, service_id): """Check an association between a policy and a service.""" self.policy_api.get_policy(policy_id) self.catalog_api.get_service(service_id) self.endpoint_policy_api.check_policy_association( policy_id, service_id=service_id) @controller.protected() def delete_policy_association_for_service(self, context, policy_id, service_id): """Delete an association between a policy and a service.""" self.policy_api.get_policy(policy_id) self.catalog_api.get_service(service_id) self.endpoint_policy_api.delete_policy_association( policy_id, service_id=service_id) @controller.protected() def create_policy_association_for_region_and_service( self, context, policy_id, service_id, region_id): """Create an association between a policy and region+service.""" self.policy_api.get_policy(policy_id) self.catalog_api.get_service(service_id) self.catalog_api.get_region(region_id) self.endpoint_policy_api.create_policy_association( policy_id, service_id=service_id, region_id=region_id) @controller.protected() def check_policy_association_for_region_and_service( self, context, policy_id, service_id, region_id): """Check an association between a policy and region+service.""" self.policy_api.get_policy(policy_id) self.catalog_api.get_service(service_id) self.catalog_api.get_region(region_id) self.endpoint_policy_api.check_policy_association( policy_id, service_id=service_id, region_id=region_id) @controller.protected() def delete_policy_association_for_region_and_service( self, context, policy_id, service_id, region_id): """Delete an association between a policy and region+service.""" self.policy_api.get_policy(policy_id) self.catalog_api.get_service(service_id) self.catalog_api.get_region(region_id) self.endpoint_policy_api.delete_policy_association( policy_id, service_id=service_id, region_id=region_id) @controller.protected() def get_policy_for_endpoint(self, context, endpoint_id): """Get the effective policy for an endpoint.""" self.catalog_api.get_endpoint(endpoint_id) ref = self.endpoint_policy_api.get_policy_for_endpoint(endpoint_id) # NOTE(henry-nash): since the collection and member for this class is # set to endpoints, we have to handle wrapping this policy entity # ourselves. self._add_self_referential_link(context, ref) return {'policy': ref} # NOTE(henry-nash): As in the catalog controller, we must ensure that the # legacy_endpoint_id does not escape. @classmethod def filter_endpoint(cls, ref): if 'legacy_endpoint_id' in ref: ref.pop('legacy_endpoint_id') return ref @classmethod def wrap_member(cls, context, ref): ref = cls.filter_endpoint(ref) return super(EndpointPolicyV3Controller, cls).wrap_member(context, ref) @controller.protected() def list_endpoints_for_policy(self, context, policy_id): """List endpoints with the effective association to a policy.""" self.policy_api.get_policy(policy_id) refs = self.endpoint_policy_api.list_endpoints_for_policy(policy_id) return EndpointPolicyV3Controller.wrap_collection(context, refs) keystone-9.0.0/keystone/endpoint_policy/routers.py0000664000567000056710000000742712701407102023643 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools from keystone.common import json_home from keystone.common import wsgi from keystone.endpoint_policy import controllers build_resource_relation = functools.partial( json_home.build_v3_extension_resource_relation, extension_name='OS-ENDPOINT-POLICY', extension_version='1.0') class Routers(wsgi.RoutersBase): PATH_PREFIX = '/OS-ENDPOINT-POLICY' def append_v3_routers(self, mapper, routers): endpoint_policy_controller = controllers.EndpointPolicyV3Controller() self._add_resource( mapper, endpoint_policy_controller, path='/endpoints/{endpoint_id}' + self.PATH_PREFIX + '/policy', get_head_action='get_policy_for_endpoint', rel=build_resource_relation(resource_name='endpoint_policy'), path_vars={'endpoint_id': json_home.Parameters.ENDPOINT_ID}) self._add_resource( mapper, endpoint_policy_controller, path='/policies/{policy_id}' + self.PATH_PREFIX + '/endpoints', get_action='list_endpoints_for_policy', rel=build_resource_relation(resource_name='policy_endpoints'), path_vars={'policy_id': json_home.Parameters.POLICY_ID}) self._add_resource( mapper, endpoint_policy_controller, path=('/policies/{policy_id}' + self.PATH_PREFIX + '/endpoints/{endpoint_id}'), get_head_action='check_policy_association_for_endpoint', put_action='create_policy_association_for_endpoint', delete_action='delete_policy_association_for_endpoint', rel=build_resource_relation( resource_name='endpoint_policy_association'), path_vars={ 'policy_id': json_home.Parameters.POLICY_ID, 'endpoint_id': json_home.Parameters.ENDPOINT_ID, }) self._add_resource( mapper, endpoint_policy_controller, path=('/policies/{policy_id}' + self.PATH_PREFIX + '/services/{service_id}'), get_head_action='check_policy_association_for_service', put_action='create_policy_association_for_service', delete_action='delete_policy_association_for_service', rel=build_resource_relation( resource_name='service_policy_association'), path_vars={ 'policy_id': json_home.Parameters.POLICY_ID, 'service_id': json_home.Parameters.SERVICE_ID, }) self._add_resource( mapper, endpoint_policy_controller, path=('/policies/{policy_id}' + self.PATH_PREFIX + '/services/{service_id}/regions/{region_id}'), get_head_action='check_policy_association_for_region_and_service', put_action='create_policy_association_for_region_and_service', delete_action='delete_policy_association_for_region_and_service', rel=build_resource_relation( resource_name='region_and_service_policy_association'), path_vars={ 'policy_id': json_home.Parameters.POLICY_ID, 'service_id': json_home.Parameters.SERVICE_ID, 'region_id': json_home.Parameters.REGION_ID, }) keystone-9.0.0/keystone/catalog/0000775000567000056710000000000012701407246020000 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/catalog/backends/0000775000567000056710000000000012701407246021552 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/catalog/backends/__init__.py0000664000567000056710000000000012701407102023640 0ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/catalog/backends/sql.py0000664000567000056710000006037612701407102022726 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # Copyright 2012 Canonical Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools from oslo_config import cfg import sqlalchemy from sqlalchemy.sql import true from keystone import catalog from keystone.catalog import core from keystone.common import driver_hints from keystone.common import sql from keystone import exception from keystone.i18n import _ CONF = cfg.CONF class Region(sql.ModelBase, sql.DictBase): __tablename__ = 'region' attributes = ['id', 'description', 'parent_region_id'] id = sql.Column(sql.String(255), primary_key=True) description = sql.Column(sql.String(255), nullable=False) # NOTE(jaypipes): Right now, using an adjacency list model for # storing the hierarchy of regions is fine, since # the API does not support any kind of querying for # more complex hierarchical queries such as "get me only # the regions that are subchildren of this region", etc. # If, in the future, such queries are needed, then it # would be possible to add in columns to this model for # "left" and "right" and provide support for a nested set # model. parent_region_id = sql.Column(sql.String(255), nullable=True) extra = sql.Column(sql.JsonBlob()) endpoints = sqlalchemy.orm.relationship("Endpoint", backref="region") class Service(sql.ModelBase, sql.DictBase): __tablename__ = 'service' attributes = ['id', 'type', 'enabled'] id = sql.Column(sql.String(64), primary_key=True) type = sql.Column(sql.String(255)) enabled = sql.Column(sql.Boolean, nullable=False, default=True, server_default=sqlalchemy.sql.expression.true()) extra = sql.Column(sql.JsonBlob()) endpoints = sqlalchemy.orm.relationship("Endpoint", backref="service") class Endpoint(sql.ModelBase, sql.DictBase): __tablename__ = 'endpoint' attributes = ['id', 'interface', 'region_id', 'service_id', 'url', 'legacy_endpoint_id', 'enabled'] id = sql.Column(sql.String(64), primary_key=True) legacy_endpoint_id = sql.Column(sql.String(64)) interface = sql.Column(sql.String(8), nullable=False) region_id = sql.Column(sql.String(255), sql.ForeignKey('region.id', ondelete='RESTRICT'), nullable=True, default=None) service_id = sql.Column(sql.String(64), sql.ForeignKey('service.id'), nullable=False) url = sql.Column(sql.Text(), nullable=False) enabled = sql.Column(sql.Boolean, nullable=False, default=True, server_default=sqlalchemy.sql.expression.true()) extra = sql.Column(sql.JsonBlob()) class Catalog(catalog.CatalogDriverV8): # Regions def list_regions(self, hints): with sql.session_for_read() as session: regions = session.query(Region) regions = sql.filter_limit_query(Region, regions, hints) return [s.to_dict() for s in list(regions)] def _get_region(self, session, region_id): ref = session.query(Region).get(region_id) if not ref: raise exception.RegionNotFound(region_id=region_id) return ref def _delete_child_regions(self, session, region_id, root_region_id): """Delete all child regions. Recursively delete any region that has the supplied region as its parent. """ children = session.query(Region).filter_by(parent_region_id=region_id) for child in children: if child.id == root_region_id: # Hit a circular region hierarchy return self._delete_child_regions(session, child.id, root_region_id) session.delete(child) def _check_parent_region(self, session, region_ref): """Raise a NotFound if the parent region does not exist. If the region_ref has a specified parent_region_id, check that the parent exists, otherwise, raise a NotFound. """ parent_region_id = region_ref.get('parent_region_id') if parent_region_id is not None: # This will raise NotFound if the parent doesn't exist, # which is the behavior we want. self._get_region(session, parent_region_id) def _has_endpoints(self, session, region, root_region): if region.endpoints is not None and len(region.endpoints) > 0: return True q = session.query(Region) q = q.filter_by(parent_region_id=region.id) for child in q.all(): if child.id == root_region.id: # Hit a circular region hierarchy return False if self._has_endpoints(session, child, root_region): return True return False def get_region(self, region_id): with sql.session_for_read() as session: return self._get_region(session, region_id).to_dict() def delete_region(self, region_id): with sql.session_for_write() as session: ref = self._get_region(session, region_id) if self._has_endpoints(session, ref, ref): raise exception.RegionDeletionError(region_id=region_id) self._delete_child_regions(session, region_id, region_id) session.delete(ref) @sql.handle_conflicts(conflict_type='region') def create_region(self, region_ref): with sql.session_for_write() as session: self._check_parent_region(session, region_ref) region = Region.from_dict(region_ref) session.add(region) return region.to_dict() def update_region(self, region_id, region_ref): with sql.session_for_write() as session: self._check_parent_region(session, region_ref) ref = self._get_region(session, region_id) old_dict = ref.to_dict() old_dict.update(region_ref) self._ensure_no_circle_in_hierarchical_regions(old_dict) new_region = Region.from_dict(old_dict) for attr in Region.attributes: if attr != 'id': setattr(ref, attr, getattr(new_region, attr)) return ref.to_dict() # Services @driver_hints.truncated def list_services(self, hints): with sql.session_for_read() as session: services = session.query(Service) services = sql.filter_limit_query(Service, services, hints) return [s.to_dict() for s in list(services)] def _get_service(self, session, service_id): ref = session.query(Service).get(service_id) if not ref: raise exception.ServiceNotFound(service_id=service_id) return ref def get_service(self, service_id): with sql.session_for_read() as session: return self._get_service(session, service_id).to_dict() def delete_service(self, service_id): with sql.session_for_write() as session: ref = self._get_service(session, service_id) session.query(Endpoint).filter_by(service_id=service_id).delete() session.delete(ref) def create_service(self, service_id, service_ref): with sql.session_for_write() as session: service = Service.from_dict(service_ref) session.add(service) return service.to_dict() def update_service(self, service_id, service_ref): with sql.session_for_write() as session: ref = self._get_service(session, service_id) old_dict = ref.to_dict() old_dict.update(service_ref) new_service = Service.from_dict(old_dict) for attr in Service.attributes: if attr != 'id': setattr(ref, attr, getattr(new_service, attr)) ref.extra = new_service.extra return ref.to_dict() # Endpoints def create_endpoint(self, endpoint_id, endpoint_ref): new_endpoint = Endpoint.from_dict(endpoint_ref) with sql.session_for_write() as session: session.add(new_endpoint) return new_endpoint.to_dict() def delete_endpoint(self, endpoint_id): with sql.session_for_write() as session: ref = self._get_endpoint(session, endpoint_id) session.delete(ref) def _get_endpoint(self, session, endpoint_id): try: return session.query(Endpoint).filter_by(id=endpoint_id).one() except sql.NotFound: raise exception.EndpointNotFound(endpoint_id=endpoint_id) def get_endpoint(self, endpoint_id): with sql.session_for_read() as session: return self._get_endpoint(session, endpoint_id).to_dict() @driver_hints.truncated def list_endpoints(self, hints): with sql.session_for_read() as session: endpoints = session.query(Endpoint) endpoints = sql.filter_limit_query(Endpoint, endpoints, hints) return [e.to_dict() for e in list(endpoints)] def update_endpoint(self, endpoint_id, endpoint_ref): with sql.session_for_write() as session: ref = self._get_endpoint(session, endpoint_id) old_dict = ref.to_dict() old_dict.update(endpoint_ref) new_endpoint = Endpoint.from_dict(old_dict) for attr in Endpoint.attributes: if attr != 'id': setattr(ref, attr, getattr(new_endpoint, attr)) ref.extra = new_endpoint.extra return ref.to_dict() def get_catalog(self, user_id, tenant_id): """Retrieve and format the V2 service catalog. :param user_id: The id of the user who has been authenticated for creating service catalog. :param tenant_id: The id of the project. 'tenant_id' will be None in the case this being called to create a catalog to go in a domain scoped token. In this case, any endpoint that requires a tenant_id as part of their URL will be skipped (as would a whole service if, as a consequence, it has no valid endpoints). :returns: A nested dict representing the service catalog or an empty dict. """ substitutions = dict( itertools.chain(CONF.items(), CONF.eventlet_server.items())) substitutions.update({'user_id': user_id}) silent_keyerror_failures = [] if tenant_id: substitutions.update({ 'tenant_id': tenant_id, 'project_id': tenant_id }) else: silent_keyerror_failures = ['tenant_id', 'project_id', ] with sql.session_for_read() as session: endpoints = (session.query(Endpoint). options(sql.joinedload(Endpoint.service)). filter(Endpoint.enabled == true()).all()) catalog = {} for endpoint in endpoints: if not endpoint.service['enabled']: continue try: formatted_url = core.format_url( endpoint['url'], substitutions, silent_keyerror_failures=silent_keyerror_failures) if formatted_url is not None: url = formatted_url else: continue except exception.MalformedEndpoint: continue # this failure is already logged in format_url() region = endpoint['region_id'] service_type = endpoint.service['type'] default_service = { 'id': endpoint['id'], 'name': endpoint.service.extra.get('name', ''), 'publicURL': '' } catalog.setdefault(region, {}) catalog[region].setdefault(service_type, default_service) interface_url = '%sURL' % endpoint['interface'] catalog[region][service_type][interface_url] = url return catalog def get_v3_catalog(self, user_id, tenant_id): """Retrieve and format the current V3 service catalog. :param user_id: The id of the user who has been authenticated for creating service catalog. :param tenant_id: The id of the project. 'tenant_id' will be None in the case this being called to create a catalog to go in a domain scoped token. In this case, any endpoint that requires a tenant_id as part of their URL will be skipped. :returns: A list representing the service catalog or an empty list """ d = dict( itertools.chain(CONF.items(), CONF.eventlet_server.items())) d.update({'user_id': user_id}) silent_keyerror_failures = [] if tenant_id: d.update({ 'tenant_id': tenant_id, 'project_id': tenant_id, }) else: silent_keyerror_failures = ['tenant_id', 'project_id', ] with sql.session_for_read() as session: services = (session.query(Service).filter( Service.enabled == true()).options( sql.joinedload(Service.endpoints)).all()) def make_v3_endpoints(endpoints): for endpoint in (ep.to_dict() for ep in endpoints if ep.enabled): del endpoint['service_id'] del endpoint['legacy_endpoint_id'] del endpoint['enabled'] endpoint['region'] = endpoint['region_id'] try: formatted_url = core.format_url( endpoint['url'], d, silent_keyerror_failures=silent_keyerror_failures) if formatted_url: endpoint['url'] = formatted_url else: continue except exception.MalformedEndpoint: # this failure is already logged in format_url() continue yield endpoint # TODO(davechen): If there is service with no endpoints, we should # skip the service instead of keeping it in the catalog, # see bug #1436704. def make_v3_service(svc): eps = list(make_v3_endpoints(svc.endpoints)) service = {'endpoints': eps, 'id': svc.id, 'type': svc.type} service['name'] = svc.extra.get('name', '') return service return [make_v3_service(svc) for svc in services] @sql.handle_conflicts(conflict_type='project_endpoint') def add_endpoint_to_project(self, endpoint_id, project_id): with sql.session_for_write() as session: endpoint_filter_ref = ProjectEndpoint(endpoint_id=endpoint_id, project_id=project_id) session.add(endpoint_filter_ref) def _get_project_endpoint_ref(self, session, endpoint_id, project_id): endpoint_filter_ref = session.query(ProjectEndpoint).get( (endpoint_id, project_id)) if endpoint_filter_ref is None: msg = _('Endpoint %(endpoint_id)s not found in project ' '%(project_id)s') % {'endpoint_id': endpoint_id, 'project_id': project_id} raise exception.NotFound(msg) return endpoint_filter_ref def check_endpoint_in_project(self, endpoint_id, project_id): with sql.session_for_read() as session: self._get_project_endpoint_ref(session, endpoint_id, project_id) def remove_endpoint_from_project(self, endpoint_id, project_id): with sql.session_for_write() as session: endpoint_filter_ref = self._get_project_endpoint_ref( session, endpoint_id, project_id) session.delete(endpoint_filter_ref) def list_endpoints_for_project(self, project_id): with sql.session_for_read() as session: query = session.query(ProjectEndpoint) query = query.filter_by(project_id=project_id) endpoint_filter_refs = query.all() return [ref.to_dict() for ref in endpoint_filter_refs] def list_projects_for_endpoint(self, endpoint_id): with sql.session_for_read() as session: query = session.query(ProjectEndpoint) query = query.filter_by(endpoint_id=endpoint_id) endpoint_filter_refs = query.all() return [ref.to_dict() for ref in endpoint_filter_refs] def delete_association_by_endpoint(self, endpoint_id): with sql.session_for_write() as session: query = session.query(ProjectEndpoint) query = query.filter_by(endpoint_id=endpoint_id) query.delete(synchronize_session=False) def delete_association_by_project(self, project_id): with sql.session_for_write() as session: query = session.query(ProjectEndpoint) query = query.filter_by(project_id=project_id) query.delete(synchronize_session=False) def create_endpoint_group(self, endpoint_group_id, endpoint_group): with sql.session_for_write() as session: endpoint_group_ref = EndpointGroup.from_dict(endpoint_group) session.add(endpoint_group_ref) return endpoint_group_ref.to_dict() def _get_endpoint_group(self, session, endpoint_group_id): endpoint_group_ref = session.query(EndpointGroup).get( endpoint_group_id) if endpoint_group_ref is None: raise exception.EndpointGroupNotFound( endpoint_group_id=endpoint_group_id) return endpoint_group_ref def get_endpoint_group(self, endpoint_group_id): with sql.session_for_read() as session: endpoint_group_ref = self._get_endpoint_group(session, endpoint_group_id) return endpoint_group_ref.to_dict() def update_endpoint_group(self, endpoint_group_id, endpoint_group): with sql.session_for_write() as session: endpoint_group_ref = self._get_endpoint_group(session, endpoint_group_id) old_endpoint_group = endpoint_group_ref.to_dict() old_endpoint_group.update(endpoint_group) new_endpoint_group = EndpointGroup.from_dict(old_endpoint_group) for attr in EndpointGroup.mutable_attributes: setattr(endpoint_group_ref, attr, getattr(new_endpoint_group, attr)) return endpoint_group_ref.to_dict() def delete_endpoint_group(self, endpoint_group_id): with sql.session_for_write() as session: endpoint_group_ref = self._get_endpoint_group(session, endpoint_group_id) self._delete_endpoint_group_association_by_endpoint_group( session, endpoint_group_id) session.delete(endpoint_group_ref) def get_endpoint_group_in_project(self, endpoint_group_id, project_id): with sql.session_for_read() as session: ref = self._get_endpoint_group_in_project(session, endpoint_group_id, project_id) return ref.to_dict() @sql.handle_conflicts(conflict_type='project_endpoint_group') def add_endpoint_group_to_project(self, endpoint_group_id, project_id): with sql.session_for_write() as session: # Create a new Project Endpoint group entity endpoint_group_project_ref = ProjectEndpointGroupMembership( endpoint_group_id=endpoint_group_id, project_id=project_id) session.add(endpoint_group_project_ref) def _get_endpoint_group_in_project(self, session, endpoint_group_id, project_id): endpoint_group_project_ref = session.query( ProjectEndpointGroupMembership).get((endpoint_group_id, project_id)) if endpoint_group_project_ref is None: msg = _('Endpoint Group Project Association not found') raise exception.NotFound(msg) else: return endpoint_group_project_ref def list_endpoint_groups(self): with sql.session_for_read() as session: query = session.query(EndpointGroup) endpoint_group_refs = query.all() return [e.to_dict() for e in endpoint_group_refs] def list_endpoint_groups_for_project(self, project_id): with sql.session_for_read() as session: query = session.query(ProjectEndpointGroupMembership) query = query.filter_by(project_id=project_id) endpoint_group_refs = query.all() return [ref.to_dict() for ref in endpoint_group_refs] def remove_endpoint_group_from_project(self, endpoint_group_id, project_id): with sql.session_for_write() as session: endpoint_group_project_ref = self._get_endpoint_group_in_project( session, endpoint_group_id, project_id) session.delete(endpoint_group_project_ref) def list_projects_associated_with_endpoint_group(self, endpoint_group_id): with sql.session_for_read() as session: query = session.query(ProjectEndpointGroupMembership) query = query.filter_by(endpoint_group_id=endpoint_group_id) endpoint_group_refs = query.all() return [ref.to_dict() for ref in endpoint_group_refs] def _delete_endpoint_group_association_by_endpoint_group( self, session, endpoint_group_id): query = session.query(ProjectEndpointGroupMembership) query = query.filter_by(endpoint_group_id=endpoint_group_id) query.delete() def delete_endpoint_group_association_by_project(self, project_id): with sql.session_for_write() as session: query = session.query(ProjectEndpointGroupMembership) query = query.filter_by(project_id=project_id) query.delete() class ProjectEndpoint(sql.ModelBase, sql.ModelDictMixin): """project-endpoint relationship table.""" __tablename__ = 'project_endpoint' attributes = ['endpoint_id', 'project_id'] endpoint_id = sql.Column(sql.String(64), primary_key=True, nullable=False) project_id = sql.Column(sql.String(64), primary_key=True, nullable=False) class EndpointGroup(sql.ModelBase, sql.ModelDictMixin): """Endpoint Groups table.""" __tablename__ = 'endpoint_group' attributes = ['id', 'name', 'description', 'filters'] mutable_attributes = frozenset(['name', 'description', 'filters']) id = sql.Column(sql.String(64), primary_key=True) name = sql.Column(sql.String(255), nullable=False) description = sql.Column(sql.Text, nullable=True) filters = sql.Column(sql.JsonBlob(), nullable=False) class ProjectEndpointGroupMembership(sql.ModelBase, sql.ModelDictMixin): """Project to Endpoint group relationship table.""" __tablename__ = 'project_endpoint_group' attributes = ['endpoint_group_id', 'project_id'] endpoint_group_id = sql.Column(sql.String(64), sql.ForeignKey('endpoint_group.id'), nullable=False) project_id = sql.Column(sql.String(64), nullable=False) __table_args__ = (sql.PrimaryKeyConstraint('endpoint_group_id', 'project_id'),) keystone-9.0.0/keystone/catalog/backends/templated.py0000664000567000056710000002425612701407102024103 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools import os.path from oslo_config import cfg from oslo_log import log import six from keystone.catalog import core from keystone import exception from keystone.i18n import _LC LOG = log.getLogger(__name__) CONF = cfg.CONF def parse_templates(template_lines): o = {} for line in template_lines: if ' = ' not in line: continue k, v = line.strip().split(' = ') if not k.startswith('catalog.'): continue parts = k.split('.') region = parts[1] # NOTE(termie): object-store insists on having a dash service = parts[2].replace('_', '-') key = parts[3] region_ref = o.get(region, {}) service_ref = region_ref.get(service, {}) service_ref[key] = v region_ref[service] = service_ref o[region] = region_ref return o class Catalog(core.Driver): """A backend that generates endpoints for the Catalog based on templates. It is usually configured via config entries that look like: catalog.$REGION.$SERVICE.$key = $value and is stored in a similar looking hierarchy. Where a value can contain values to be interpolated by standard python string interpolation that look like (the % is replaced by a $ due to paste attempting to interpolate on its own: http://localhost:$(public_port)s/ When expanding the template it will pass in a dict made up of the conf instance plus a few additional key-values, notably tenant_id and user_id. It does not care what the keys and values are but it is worth noting that keystone_compat will expect certain keys to be there so that it can munge them into the output format keystone expects. These keys are: name - the name of the service, most likely repeated for all services of the same type, across regions. adminURL - the url of the admin endpoint publicURL - the url of the public endpoint internalURL - the url of the internal endpoint """ def __init__(self, templates=None): super(Catalog, self).__init__() if templates: self.templates = templates else: template_file = CONF.catalog.template_file if not os.path.exists(template_file): template_file = CONF.find_file(template_file) self._load_templates(template_file) def _load_templates(self, template_file): try: with open(template_file) as f: self.templates = parse_templates(f) except IOError: LOG.critical(_LC('Unable to open template file %s'), template_file) raise # region crud def create_region(self, region_ref): raise exception.NotImplemented() def list_regions(self, hints): return [{'id': region_id, 'description': '', 'parent_region_id': ''} for region_id in self.templates] def get_region(self, region_id): if region_id in self.templates: return {'id': region_id, 'description': '', 'parent_region_id': ''} raise exception.RegionNotFound(region_id=region_id) def update_region(self, region_id, region_ref): raise exception.NotImplemented() def delete_region(self, region_id): raise exception.NotImplemented() # service crud def create_service(self, service_id, service_ref): raise exception.NotImplemented() def _list_services(self, hints): for region_ref in six.itervalues(self.templates): for service_type, service_ref in six.iteritems(region_ref): yield { 'id': service_type, 'enabled': True, 'name': service_ref.get('name', ''), 'description': service_ref.get('description', ''), 'type': service_type, } def list_services(self, hints): return list(self._list_services(hints=None)) def get_service(self, service_id): for service in self._list_services(hints=None): if service['id'] == service_id: return service raise exception.ServiceNotFound(service_id=service_id) def update_service(self, service_id, service_ref): raise exception.NotImplemented() def delete_service(self, service_id): raise exception.NotImplemented() # endpoint crud def create_endpoint(self, endpoint_id, endpoint_ref): raise exception.NotImplemented() def _list_endpoints(self): for region_id, region_ref in six.iteritems(self.templates): for service_type, service_ref in six.iteritems(region_ref): for key in service_ref: if key.endswith('URL'): interface = key[:-3] endpoint_id = ('%s-%s-%s' % (region_id, service_type, interface)) yield { 'id': endpoint_id, 'service_id': service_type, 'interface': interface, 'url': service_ref[key], 'legacy_endpoint_id': None, 'region_id': region_id, 'enabled': True, } def list_endpoints(self, hints): return list(self._list_endpoints()) def get_endpoint(self, endpoint_id): for endpoint in self._list_endpoints(): if endpoint['id'] == endpoint_id: return endpoint raise exception.EndpointNotFound(endpoint_id=endpoint_id) def update_endpoint(self, endpoint_id, endpoint_ref): raise exception.NotImplemented() def delete_endpoint(self, endpoint_id): raise exception.NotImplemented() def get_catalog(self, user_id, tenant_id): """Retrieve and format the V2 service catalog. :param user_id: The id of the user who has been authenticated for creating service catalog. :param tenant_id: The id of the project. 'tenant_id' will be None in the case this being called to create a catalog to go in a domain scoped token. In this case, any endpoint that requires a tenant_id as part of their URL will be skipped. :returns: A nested dict representing the service catalog or an empty dict. """ substitutions = dict( itertools.chain(CONF.items(), CONF.eventlet_server.items())) substitutions.update({'user_id': user_id}) silent_keyerror_failures = [] if tenant_id: substitutions.update({ 'tenant_id': tenant_id, 'project_id': tenant_id, }) else: silent_keyerror_failures = ['tenant_id', 'project_id', ] catalog = {} # TODO(davechen): If there is service with no endpoints, we should # skip the service instead of keeping it in the catalog. # see bug #1436704. for region, region_ref in self.templates.items(): catalog[region] = {} for service, service_ref in region_ref.items(): service_data = {} try: for k, v in service_ref.items(): formatted_value = core.format_url( v, substitutions, silent_keyerror_failures=silent_keyerror_failures) if formatted_value: service_data[k] = formatted_value except exception.MalformedEndpoint: continue # this failure is already logged in format_url() catalog[region][service] = service_data return catalog def add_endpoint_to_project(self, endpoint_id, project_id): raise exception.NotImplemented() def remove_endpoint_from_project(self, endpoint_id, project_id): raise exception.NotImplemented() def check_endpoint_in_project(self, endpoint_id, project_id): raise exception.NotImplemented() def list_endpoints_for_project(self, project_id): raise exception.NotImplemented() def list_projects_for_endpoint(self, endpoint_id): raise exception.NotImplemented() def delete_association_by_endpoint(self, endpoint_id): raise exception.NotImplemented() def delete_association_by_project(self, project_id): raise exception.NotImplemented() def create_endpoint_group(self, endpoint_group): raise exception.NotImplemented() def get_endpoint_group(self, endpoint_group_id): raise exception.NotImplemented() def update_endpoint_group(self, endpoint_group_id, endpoint_group): raise exception.NotImplemented() def delete_endpoint_group(self, endpoint_group_id): raise exception.NotImplemented() def add_endpoint_group_to_project(self, endpoint_group_id, project_id): raise exception.NotImplemented() def get_endpoint_group_in_project(self, endpoint_group_id, project_id): raise exception.NotImplemented() def list_endpoint_groups(self): raise exception.NotImplemented() def list_endpoint_groups_for_project(self, project_id): raise exception.NotImplemented() def list_projects_associated_with_endpoint_group(self, endpoint_group_id): raise exception.NotImplemented() def remove_endpoint_group_from_project(self, endpoint_group_id, project_id): raise exception.NotImplemented() def delete_endpoint_group_association_by_project(self, project_id): raise exception.NotImplemented() keystone-9.0.0/keystone/catalog/schema.py0000664000567000056710000000546012701407102021606 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common import validation from keystone.common.validation import parameter_types _region_properties = { 'description': { 'type': ['string', 'null'], }, # NOTE(lbragstad): Regions use ID differently. The user can specify the ID # or it will be generated automatically. 'id': { 'type': 'string' }, 'parent_region_id': { 'type': ['string', 'null'] } } region_create = { 'type': 'object', 'properties': _region_properties, 'additionalProperties': True # NOTE(lbragstad): No parameters are required for creating regions. } region_update = { 'type': 'object', 'properties': _region_properties, 'minProperties': 1, 'additionalProperties': True } _service_properties = { 'enabled': parameter_types.boolean, 'name': parameter_types.name, 'type': { 'type': 'string', 'minLength': 1, 'maxLength': 255 } } service_create = { 'type': 'object', 'properties': _service_properties, 'required': ['type'], 'additionalProperties': True, } service_update = { 'type': 'object', 'properties': _service_properties, 'minProperties': 1, 'additionalProperties': True } _endpoint_properties = { 'enabled': parameter_types.boolean, 'interface': { 'type': 'string', 'enum': ['admin', 'internal', 'public'] }, 'region_id': { 'type': 'string' }, 'region': { 'type': 'string' }, 'service_id': { 'type': 'string' }, 'url': parameter_types.url } endpoint_create = { 'type': 'object', 'properties': _endpoint_properties, 'required': ['interface', 'service_id', 'url'], 'additionalProperties': True } endpoint_update = { 'type': 'object', 'properties': _endpoint_properties, 'minProperties': 1, 'additionalProperties': True } _endpoint_group_properties = { 'description': validation.nullable(parameter_types.description), 'filters': { 'type': 'object' }, 'name': parameter_types.name } endpoint_group_create = { 'type': 'object', 'properties': _endpoint_group_properties, 'required': ['name', 'filters'] } endpoint_group_update = { 'type': 'object', 'properties': _endpoint_group_properties, 'minProperties': 1 } keystone-9.0.0/keystone/catalog/__init__.py0000664000567000056710000000124712701407102022104 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.catalog import controllers # noqa from keystone.catalog.core import * # noqa keystone-9.0.0/keystone/catalog/core.py0000664000567000056710000007774312701407102021313 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # Copyright 2012 Canonical Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Main entry point into the Catalog service.""" import abc import itertools from oslo_cache import core as oslo_cache from oslo_config import cfg from oslo_log import log import six from keystone.common import cache from keystone.common import dependency from keystone.common import driver_hints from keystone.common import manager from keystone.common import utils from keystone import exception from keystone.i18n import _ from keystone.i18n import _LE from keystone import notifications CONF = cfg.CONF LOG = log.getLogger(__name__) WHITELISTED_PROPERTIES = [ 'tenant_id', 'project_id', 'user_id', 'public_bind_host', 'admin_bind_host', 'compute_host', 'admin_port', 'public_port', 'public_endpoint', 'admin_endpoint', ] # This is a general cache region for catalog administration (CRUD operations). MEMOIZE = cache.get_memoization_decorator(group='catalog') # This builds a discrete cache region dedicated to complete service catalogs # computed for a given user + project pair. Any write operation to create, # modify or delete elements of the service catalog should invalidate this # entire cache region. COMPUTED_CATALOG_REGION = oslo_cache.create_region() MEMOIZE_COMPUTED_CATALOG = cache.get_memoization_decorator( group='catalog', region=COMPUTED_CATALOG_REGION) def format_url(url, substitutions, silent_keyerror_failures=None): """Formats a user-defined URL with the given substitutions. :param string url: the URL to be formatted :param dict substitutions: the dictionary used for substitution :param list silent_keyerror_failures: keys for which we should be silent if there is a KeyError exception on substitution attempt :returns: a formatted URL """ substitutions = utils.WhiteListedItemFilter( WHITELISTED_PROPERTIES, substitutions) allow_keyerror = silent_keyerror_failures or [] try: result = url.replace('$(', '%(') % substitutions except AttributeError: LOG.error(_LE('Malformed endpoint - %(url)r is not a string'), {"url": url}) raise exception.MalformedEndpoint(endpoint=url) except KeyError as e: if not e.args or e.args[0] not in allow_keyerror: LOG.error(_LE("Malformed endpoint %(url)s - unknown key " "%(keyerror)s"), {"url": url, "keyerror": e}) raise exception.MalformedEndpoint(endpoint=url) else: result = None except TypeError as e: LOG.error(_LE("Malformed endpoint '%(url)s'. The following type error " "occurred during string substitution: %(typeerror)s"), {"url": url, "typeerror": e}) raise exception.MalformedEndpoint(endpoint=url) except ValueError as e: LOG.error(_LE("Malformed endpoint %s - incomplete format " "(are you missing a type notifier ?)"), url) raise exception.MalformedEndpoint(endpoint=url) return result def check_endpoint_url(url): """Check substitution of url. The invalid urls are as follows: urls with substitutions that is not in the whitelist Check the substitutions in the URL to make sure they are valid and on the whitelist. :param str url: the URL to validate :rtype: None :raises keystone.exception.URLValidationError: if the URL is invalid """ # check whether the property in the path is exactly the same # with that in the whitelist below substitutions = dict(zip(WHITELISTED_PROPERTIES, itertools.repeat(''))) try: url.replace('$(', '%(') % substitutions except (KeyError, TypeError, ValueError): raise exception.URLValidationError(url) @dependency.provider('catalog_api') @dependency.requires('resource_api') class Manager(manager.Manager): """Default pivot point for the Catalog backend. See :mod:`keystone.common.manager.Manager` for more details on how this dynamically calls the backend. """ driver_namespace = 'keystone.catalog' _ENDPOINT = 'endpoint' _SERVICE = 'service' _REGION = 'region' def __init__(self): super(Manager, self).__init__(CONF.catalog.driver) def create_region(self, region_ref, initiator=None): # Check duplicate ID try: self.get_region(region_ref['id']) except exception.RegionNotFound: # nosec # A region with the same id doesn't exist already, good. pass else: msg = _('Duplicate ID, %s.') % region_ref['id'] raise exception.Conflict(type='region', details=msg) # NOTE(lbragstad,dstanek): The description column of the region # database cannot be null. So if the user doesn't pass in a # description or passes in a null description then set it to an # empty string. if region_ref.get('description') is None: region_ref['description'] = '' try: ret = self.driver.create_region(region_ref) except exception.NotFound: parent_region_id = region_ref.get('parent_region_id') raise exception.RegionNotFound(region_id=parent_region_id) notifications.Audit.created(self._REGION, ret['id'], initiator) COMPUTED_CATALOG_REGION.invalidate() return ret @MEMOIZE def get_region(self, region_id): try: return self.driver.get_region(region_id) except exception.NotFound: raise exception.RegionNotFound(region_id=region_id) def update_region(self, region_id, region_ref, initiator=None): # NOTE(lbragstad,dstanek): The description column of the region # database cannot be null. So if the user passes in a null # description set it to an empty string. if 'description' in region_ref and region_ref['description'] is None: region_ref['description'] = '' ref = self.driver.update_region(region_id, region_ref) notifications.Audit.updated(self._REGION, region_id, initiator) self.get_region.invalidate(self, region_id) COMPUTED_CATALOG_REGION.invalidate() return ref def delete_region(self, region_id, initiator=None): try: ret = self.driver.delete_region(region_id) notifications.Audit.deleted(self._REGION, region_id, initiator) self.get_region.invalidate(self, region_id) COMPUTED_CATALOG_REGION.invalidate() return ret except exception.NotFound: raise exception.RegionNotFound(region_id=region_id) @manager.response_truncated def list_regions(self, hints=None): return self.driver.list_regions(hints or driver_hints.Hints()) def create_service(self, service_id, service_ref, initiator=None): service_ref.setdefault('enabled', True) service_ref.setdefault('name', '') ref = self.driver.create_service(service_id, service_ref) notifications.Audit.created(self._SERVICE, service_id, initiator) COMPUTED_CATALOG_REGION.invalidate() return ref @MEMOIZE def get_service(self, service_id): try: return self.driver.get_service(service_id) except exception.NotFound: raise exception.ServiceNotFound(service_id=service_id) def update_service(self, service_id, service_ref, initiator=None): ref = self.driver.update_service(service_id, service_ref) notifications.Audit.updated(self._SERVICE, service_id, initiator) self.get_service.invalidate(self, service_id) COMPUTED_CATALOG_REGION.invalidate() return ref def delete_service(self, service_id, initiator=None): try: endpoints = self.list_endpoints() ret = self.driver.delete_service(service_id) notifications.Audit.deleted(self._SERVICE, service_id, initiator) self.get_service.invalidate(self, service_id) for endpoint in endpoints: if endpoint['service_id'] == service_id: self.get_endpoint.invalidate(self, endpoint['id']) COMPUTED_CATALOG_REGION.invalidate() return ret except exception.NotFound: raise exception.ServiceNotFound(service_id=service_id) @manager.response_truncated def list_services(self, hints=None): return self.driver.list_services(hints or driver_hints.Hints()) def _assert_region_exists(self, region_id): try: if region_id is not None: self.get_region(region_id) except exception.RegionNotFound: raise exception.ValidationError(attribute='endpoint region_id', target='region table') def _assert_service_exists(self, service_id): try: if service_id is not None: self.get_service(service_id) except exception.ServiceNotFound: raise exception.ValidationError(attribute='endpoint service_id', target='service table') def create_endpoint(self, endpoint_id, endpoint_ref, initiator=None): self._assert_region_exists(endpoint_ref.get('region_id')) self._assert_service_exists(endpoint_ref['service_id']) ref = self.driver.create_endpoint(endpoint_id, endpoint_ref) notifications.Audit.created(self._ENDPOINT, endpoint_id, initiator) COMPUTED_CATALOG_REGION.invalidate() return ref def update_endpoint(self, endpoint_id, endpoint_ref, initiator=None): self._assert_region_exists(endpoint_ref.get('region_id')) self._assert_service_exists(endpoint_ref.get('service_id')) ref = self.driver.update_endpoint(endpoint_id, endpoint_ref) notifications.Audit.updated(self._ENDPOINT, endpoint_id, initiator) self.get_endpoint.invalidate(self, endpoint_id) COMPUTED_CATALOG_REGION.invalidate() return ref def delete_endpoint(self, endpoint_id, initiator=None): try: ret = self.driver.delete_endpoint(endpoint_id) notifications.Audit.deleted(self._ENDPOINT, endpoint_id, initiator) self.get_endpoint.invalidate(self, endpoint_id) COMPUTED_CATALOG_REGION.invalidate() return ret except exception.NotFound: raise exception.EndpointNotFound(endpoint_id=endpoint_id) @MEMOIZE def get_endpoint(self, endpoint_id): try: return self.driver.get_endpoint(endpoint_id) except exception.NotFound: raise exception.EndpointNotFound(endpoint_id=endpoint_id) @manager.response_truncated def list_endpoints(self, hints=None): return self.driver.list_endpoints(hints or driver_hints.Hints()) @MEMOIZE_COMPUTED_CATALOG def get_catalog(self, user_id, tenant_id): try: return self.driver.get_catalog(user_id, tenant_id) except exception.NotFound: raise exception.NotFound('Catalog not found for user and tenant') @MEMOIZE_COMPUTED_CATALOG def get_v3_catalog(self, user_id, tenant_id): return self.driver.get_v3_catalog(user_id, tenant_id) def add_endpoint_to_project(self, endpoint_id, project_id): self.driver.add_endpoint_to_project(endpoint_id, project_id) COMPUTED_CATALOG_REGION.invalidate() def remove_endpoint_from_project(self, endpoint_id, project_id): self.driver.remove_endpoint_from_project(endpoint_id, project_id) COMPUTED_CATALOG_REGION.invalidate() def add_endpoint_group_to_project(self, endpoint_group_id, project_id): self.driver.add_endpoint_group_to_project( endpoint_group_id, project_id) COMPUTED_CATALOG_REGION.invalidate() def remove_endpoint_group_from_project(self, endpoint_group_id, project_id): self.driver.remove_endpoint_group_from_project( endpoint_group_id, project_id) COMPUTED_CATALOG_REGION.invalidate() def get_endpoint_groups_for_project(self, project_id): # recover the project endpoint group memberships and for each # membership recover the endpoint group self.resource_api.get_project(project_id) try: refs = self.list_endpoint_groups_for_project(project_id) endpoint_groups = [self.get_endpoint_group( ref['endpoint_group_id']) for ref in refs] return endpoint_groups except exception.EndpointGroupNotFound: return [] def get_endpoints_filtered_by_endpoint_group(self, endpoint_group_id): endpoints = self.list_endpoints() filters = self.get_endpoint_group(endpoint_group_id)['filters'] filtered_endpoints = [] for endpoint in endpoints: is_candidate = True for key, value in filters.items(): if endpoint[key] != value: is_candidate = False break if is_candidate: filtered_endpoints.append(endpoint) return filtered_endpoints def list_endpoints_for_project(self, project_id): """List all endpoints associated with a project. :param project_id: project identifier to check :type project_id: string :returns: a list of endpoint ids or an empty list. """ refs = self.driver.list_endpoints_for_project(project_id) filtered_endpoints = {} for ref in refs: try: endpoint = self.get_endpoint(ref['endpoint_id']) filtered_endpoints.update({ref['endpoint_id']: endpoint}) except exception.EndpointNotFound: # remove bad reference from association self.remove_endpoint_from_project(ref['endpoint_id'], project_id) # need to recover endpoint_groups associated with project # then for each endpoint group return the endpoints. endpoint_groups = self.get_endpoint_groups_for_project(project_id) for endpoint_group in endpoint_groups: endpoint_refs = self.get_endpoints_filtered_by_endpoint_group( endpoint_group['id']) # now check if any endpoints for current endpoint group are not # contained in the list of filtered endpoints for endpoint_ref in endpoint_refs: if endpoint_ref['id'] not in filtered_endpoints: filtered_endpoints[endpoint_ref['id']] = endpoint_ref return filtered_endpoints @six.add_metaclass(abc.ABCMeta) class CatalogDriverV8(object): """Interface description for the Catalog driver.""" def _get_list_limit(self): return CONF.catalog.list_limit or CONF.list_limit def _ensure_no_circle_in_hierarchical_regions(self, region_ref): if region_ref.get('parent_region_id') is None: return root_region_id = region_ref['id'] parent_region_id = region_ref['parent_region_id'] while parent_region_id: # NOTE(wanghong): check before getting parent region can ensure no # self circle if parent_region_id == root_region_id: raise exception.CircularRegionHierarchyError( parent_region_id=parent_region_id) parent_region = self.get_region(parent_region_id) parent_region_id = parent_region.get('parent_region_id') @abc.abstractmethod def create_region(self, region_ref): """Creates a new region. :raises keystone.exception.Conflict: If the region already exists. :raises keystone.exception.RegionNotFound: If the parent region is invalid. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_regions(self, hints): """List all regions. :param hints: contains the list of filters yet to be satisfied. Any filters satisfied here will be removed so that the caller will know if any filters remain. :returns: list of region_refs or an empty list. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_region(self, region_id): """Get region by id. :returns: region_ref dict :raises keystone.exception.RegionNotFound: If the region doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def update_region(self, region_id, region_ref): """Update region by id. :returns: region_ref dict :raises keystone.exception.RegionNotFound: If the region doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_region(self, region_id): """Deletes an existing region. :raises keystone.exception.RegionNotFound: If the region doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def create_service(self, service_id, service_ref): """Creates a new service. :raises keystone.exception.Conflict: If a duplicate service exists. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_services(self, hints): """List all services. :param hints: contains the list of filters yet to be satisfied. Any filters satisfied here will be removed so that the caller will know if any filters remain. :returns: list of service_refs or an empty list. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_service(self, service_id): """Get service by id. :returns: service_ref dict :raises keystone.exception.ServiceNotFound: If the service doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def update_service(self, service_id, service_ref): """Update service by id. :returns: service_ref dict :raises keystone.exception.ServiceNotFound: If the service doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_service(self, service_id): """Deletes an existing service. :raises keystone.exception.ServiceNotFound: If the service doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def create_endpoint(self, endpoint_id, endpoint_ref): """Creates a new endpoint for a service. :raises keystone.exception.Conflict: If a duplicate endpoint exists. :raises keystone.exception.ServiceNotFound: If the service doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_endpoint(self, endpoint_id): """Get endpoint by id. :returns: endpoint_ref dict :raises keystone.exception.EndpointNotFound: If the endpoint doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_endpoints(self, hints): """List all endpoints. :param hints: contains the list of filters yet to be satisfied. Any filters satisfied here will be removed so that the caller will know if any filters remain. :returns: list of endpoint_refs or an empty list. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def update_endpoint(self, endpoint_id, endpoint_ref): """Get endpoint by id. :returns: endpoint_ref dict :raises keystone.exception.EndpointNotFound: If the endpoint doesn't exist. :raises keystone.exception.ServiceNotFound: If the service doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_endpoint(self, endpoint_id): """Deletes an endpoint for a service. :raises keystone.exception.EndpointNotFound: If the endpoint doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_catalog(self, user_id, tenant_id): """Retrieve and format the current service catalog. Example:: { 'RegionOne': {'compute': { 'adminURL': u'http://host:8774/v1.1/tenantid', 'internalURL': u'http://host:8774/v1.1/tenant_id', 'name': 'Compute Service', 'publicURL': u'http://host:8774/v1.1/tenantid'}, 'ec2': { 'adminURL': 'http://host:8773/services/Admin', 'internalURL': 'http://host:8773/services/Cloud', 'name': 'EC2 Service', 'publicURL': 'http://host:8773/services/Cloud'}} :returns: A nested dict representing the service catalog or an empty dict. :raises keystone.exception.NotFound: If the endpoint doesn't exist. """ raise exception.NotImplemented() # pragma: no cover def get_v3_catalog(self, user_id, tenant_id): """Retrieve and format the current V3 service catalog. The default implementation builds the V3 catalog from the V2 catalog. Example:: [ { "endpoints": [ { "interface": "public", "id": "--endpoint-id--", "region": "RegionOne", "url": "http://external:8776/v1/--project-id--" }, { "interface": "internal", "id": "--endpoint-id--", "region": "RegionOne", "url": "http://internal:8776/v1/--project-id--" }], "id": "--service-id--", "type": "volume" }] :returns: A list representing the service catalog or an empty list :raises keystone.exception.NotFound: If the endpoint doesn't exist. """ v2_catalog = self.get_catalog(user_id, tenant_id) v3_catalog = [] for region_name, region in v2_catalog.items(): for service_type, service in region.items(): service_v3 = { 'type': service_type, 'endpoints': [] } for attr, value in service.items(): # Attributes that end in URL are interfaces. In the V2 # catalog, these are internalURL, publicURL, and adminURL. # For example, .publicURL= in the V2 # catalog becomes the V3 interface for the service: # { 'interface': 'public', 'url': '', 'region': # 'region: '' } if attr.endswith('URL'): v3_interface = attr[:-len('URL')] service_v3['endpoints'].append({ 'interface': v3_interface, 'region': region_name, 'url': value, }) continue # Other attributes are copied to the service. service_v3[attr] = value v3_catalog.append(service_v3) return v3_catalog @abc.abstractmethod def add_endpoint_to_project(self, endpoint_id, project_id): """Create an endpoint to project association. :param endpoint_id: identity of endpoint to associate :type endpoint_id: string :param project_id: identity of the project to be associated with :type project_id: string :raises: keystone.exception.Conflict: If the endpoint was already added to project. :returns: None. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def remove_endpoint_from_project(self, endpoint_id, project_id): """Removes an endpoint to project association. :param endpoint_id: identity of endpoint to remove :type endpoint_id: string :param project_id: identity of the project associated with :type project_id: string :raises keystone.exception.NotFound: If the endpoint was not found in the project. :returns: None. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def check_endpoint_in_project(self, endpoint_id, project_id): """Checks if an endpoint is associated with a project. :param endpoint_id: identity of endpoint to check :type endpoint_id: string :param project_id: identity of the project associated with :type project_id: string :raises keystone.exception.NotFound: If the endpoint was not found in the project. :returns: None. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_endpoints_for_project(self, project_id): """List all endpoints associated with a project. :param project_id: identity of the project to check :type project_id: string :returns: a list of identity endpoint ids or an empty list. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_projects_for_endpoint(self, endpoint_id): """List all projects associated with an endpoint. :param endpoint_id: identity of endpoint to check :type endpoint_id: string :returns: a list of projects or an empty list. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_association_by_endpoint(self, endpoint_id): """Removes all the endpoints to project association with endpoint. :param endpoint_id: identity of endpoint to check :type endpoint_id: string :returns: None """ raise exception.NotImplemented() @abc.abstractmethod def delete_association_by_project(self, project_id): """Removes all the endpoints to project association with project. :param project_id: identity of the project to check :type project_id: string :returns: None """ raise exception.NotImplemented() @abc.abstractmethod def create_endpoint_group(self, endpoint_group): """Create an endpoint group. :param endpoint_group: endpoint group to create :type endpoint_group: dictionary :raises: keystone.exception.Conflict: If a duplicate endpoint group already exists. :returns: an endpoint group representation. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_endpoint_group(self, endpoint_group_id): """Get an endpoint group. :param endpoint_group_id: identity of endpoint group to retrieve :type endpoint_group_id: string :raises keystone.exception.NotFound: If the endpoint group was not found. :returns: an endpoint group representation. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def update_endpoint_group(self, endpoint_group_id, endpoint_group): """Update an endpoint group. :param endpoint_group_id: identity of endpoint group to retrieve :type endpoint_group_id: string :param endpoint_group: A full or partial endpoint_group :type endpoint_group: dictionary :raises keystone.exception.NotFound: If the endpoint group was not found. :returns: an endpoint group representation. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_endpoint_group(self, endpoint_group_id): """Delete an endpoint group. :param endpoint_group_id: identity of endpoint group to delete :type endpoint_group_id: string :raises keystone.exception.NotFound: If the endpoint group was not found. :returns: None. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def add_endpoint_group_to_project(self, endpoint_group_id, project_id): """Adds an endpoint group to project association. :param endpoint_group_id: identity of endpoint to associate :type endpoint_group_id: string :param project_id: identity of project to associate :type project_id: string :raises keystone.exception.Conflict: If the endpoint group was already added to the project. :returns: None. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_endpoint_group_in_project(self, endpoint_group_id, project_id): """Get endpoint group to project association. :param endpoint_group_id: identity of endpoint group to retrieve :type endpoint_group_id: string :param project_id: identity of project to associate :type project_id: string :raises keystone.exception.NotFound: If the endpoint group to the project association was not found. :returns: a project endpoint group representation. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_endpoint_groups(self): """List all endpoint groups. :returns: None. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_endpoint_groups_for_project(self, project_id): """List all endpoint group to project associations for a project. :param project_id: identity of project to associate :type project_id: string :returns: None. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_projects_associated_with_endpoint_group(self, endpoint_group_id): """List all projects associated with endpoint group. :param endpoint_group_id: identity of endpoint to associate :type endpoint_group_id: string :returns: None. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def remove_endpoint_group_from_project(self, endpoint_group_id, project_id): """Remove an endpoint to project association. :param endpoint_group_id: identity of endpoint to associate :type endpoint_group_id: string :param project_id: identity of project to associate :type project_id: string :raises keystone.exception.NotFound: If endpoint group project association was not found. :returns: None. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_endpoint_group_association_by_project(self, project_id): """Remove endpoint group to project associations. :param project_id: identity of the project to check :type project_id: string :returns: None """ raise exception.NotImplemented() # pragma: no cover Driver = manager.create_legacy_driver(CatalogDriverV8) keystone-9.0.0/keystone/catalog/controllers.py0000664000567000056710000006353412701407102022722 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # Copyright 2012 Canonical Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import six from keystone.catalog import core from keystone.catalog import schema from keystone.common import controller from keystone.common import dependency from keystone.common import validation from keystone.common import wsgi from keystone import exception from keystone.i18n import _ from keystone import notifications from keystone import resource INTERFACES = ['public', 'internal', 'admin'] @dependency.requires('catalog_api') class Service(controller.V2Controller): @controller.v2_deprecated def get_services(self, context): self.assert_admin(context) service_list = self.catalog_api.list_services() return {'OS-KSADM:services': service_list} @controller.v2_deprecated def get_service(self, context, service_id): self.assert_admin(context) service_ref = self.catalog_api.get_service(service_id) return {'OS-KSADM:service': service_ref} @controller.v2_deprecated def delete_service(self, context, service_id): self.assert_admin(context) initiator = notifications._get_request_audit_info(context) self.catalog_api.delete_service(service_id, initiator) @controller.v2_deprecated def create_service(self, context, OS_KSADM_service): self.assert_admin(context) service_id = uuid.uuid4().hex service_ref = OS_KSADM_service.copy() service_ref['id'] = service_id initiator = notifications._get_request_audit_info(context) new_service_ref = self.catalog_api.create_service( service_id, service_ref, initiator) return {'OS-KSADM:service': new_service_ref} @dependency.requires('catalog_api') class Endpoint(controller.V2Controller): @controller.v2_deprecated def get_endpoints(self, context): """Merge matching v3 endpoint refs into legacy refs.""" self.assert_admin(context) legacy_endpoints = {} v3_endpoints = {} for endpoint in self.catalog_api.list_endpoints(): if not endpoint.get('legacy_endpoint_id'): # pure v3 endpoint # tell endpoints apart by the combination of # service_id and region_id. # NOTE(muyu): in theory, it's possible that there are more than # one endpoint of one service, one region and one interface, # but in practice, it makes no sense because only one will be # used. key = (endpoint['service_id'], endpoint['region_id']) v3_endpoints.setdefault(key, []).append(endpoint) else: # legacy endpoint if endpoint['legacy_endpoint_id'] not in legacy_endpoints: legacy_ep = endpoint.copy() legacy_ep['id'] = legacy_ep.pop('legacy_endpoint_id') legacy_ep.pop('interface') legacy_ep.pop('url') legacy_ep['region'] = legacy_ep.pop('region_id') legacy_endpoints[endpoint['legacy_endpoint_id']] = ( legacy_ep) else: legacy_ep = ( legacy_endpoints[endpoint['legacy_endpoint_id']]) # add the legacy endpoint with an interface url legacy_ep['%surl' % endpoint['interface']] = endpoint['url'] # convert collected v3 endpoints into v2 endpoints for endpoints in v3_endpoints.values(): legacy_ep = {} # For v3 endpoints in the same group, contents of extra attributes # can be different, which may cause confusion if a random one is # used. So only necessary attributes are used here. # It's different for legacy v2 endpoints, which are created # with the same "extra" value when being migrated. for key in ('service_id', 'enabled'): legacy_ep[key] = endpoints[0][key] legacy_ep['region'] = endpoints[0]['region_id'] for endpoint in endpoints: # Public URL is required for v2 endpoints, so the generated v2 # endpoint uses public endpoint's id as its id, which can also # be an indicator whether a public v3 endpoint is present. # It's safe to do so is also because that there is no v2 API to # get an endpoint by endpoint ID. if endpoint['interface'] == 'public': legacy_ep['id'] = endpoint['id'] legacy_ep['%surl' % endpoint['interface']] = endpoint['url'] # this means there is no public URL of this group of v3 endpoints if 'id' not in legacy_ep: continue legacy_endpoints[legacy_ep['id']] = legacy_ep return {'endpoints': list(legacy_endpoints.values())} @controller.v2_deprecated def create_endpoint(self, context, endpoint): """Create three v3 endpoint refs based on a legacy ref.""" self.assert_admin(context) # according to the v2 spec publicurl is mandatory self._require_attribute(endpoint, 'publicurl') # service_id is necessary self._require_attribute(endpoint, 'service_id') # we should check publicurl, adminurl, internalurl # if invalid, we should raise an exception to reject # the request for interface in INTERFACES: interface_url = endpoint.get(interface + 'url') if interface_url: core.check_endpoint_url(interface_url) initiator = notifications._get_request_audit_info(context) if endpoint.get('region') is not None: try: self.catalog_api.get_region(endpoint['region']) except exception.RegionNotFound: region = dict(id=endpoint['region']) self.catalog_api.create_region(region, initiator) legacy_endpoint_ref = endpoint.copy() urls = {} for i in INTERFACES: # remove all urls so they aren't persisted them more than once url = '%surl' % i if endpoint.get(url): # valid urls need to be persisted urls[i] = endpoint.pop(url) elif url in endpoint: # null or empty urls can be discarded endpoint.pop(url) legacy_endpoint_ref.pop(url) legacy_endpoint_id = uuid.uuid4().hex for interface, url in urls.items(): endpoint_ref = endpoint.copy() endpoint_ref['id'] = uuid.uuid4().hex endpoint_ref['legacy_endpoint_id'] = legacy_endpoint_id endpoint_ref['interface'] = interface endpoint_ref['url'] = url endpoint_ref['region_id'] = endpoint_ref.pop('region') self.catalog_api.create_endpoint(endpoint_ref['id'], endpoint_ref, initiator) legacy_endpoint_ref['id'] = legacy_endpoint_id return {'endpoint': legacy_endpoint_ref} @controller.v2_deprecated def delete_endpoint(self, context, endpoint_id): """Delete up to three v3 endpoint refs based on a legacy ref ID.""" self.assert_admin(context) initiator = notifications._get_request_audit_info(context) deleted_at_least_one = False for endpoint in self.catalog_api.list_endpoints(): if endpoint['legacy_endpoint_id'] == endpoint_id: self.catalog_api.delete_endpoint(endpoint['id'], initiator) deleted_at_least_one = True if not deleted_at_least_one: raise exception.EndpointNotFound(endpoint_id=endpoint_id) @dependency.requires('catalog_api') class RegionV3(controller.V3Controller): collection_name = 'regions' member_name = 'region' def create_region_with_id(self, context, region_id, region): """Create a region with a user-specified ID. This method is unprotected because it depends on ``self.create_region`` to enforce policy. """ if 'id' in region and region_id != region['id']: raise exception.ValidationError( _('Conflicting region IDs specified: ' '"%(url_id)s" != "%(ref_id)s"') % { 'url_id': region_id, 'ref_id': region['id']}) region['id'] = region_id return self.create_region(context, region) @controller.protected() @validation.validated(schema.region_create, 'region') def create_region(self, context, region): ref = self._normalize_dict(region) if not ref.get('id'): ref = self._assign_unique_id(ref) initiator = notifications._get_request_audit_info(context) ref = self.catalog_api.create_region(ref, initiator) return wsgi.render_response( RegionV3.wrap_member(context, ref), status=(201, 'Created')) @controller.filterprotected('parent_region_id') def list_regions(self, context, filters): hints = RegionV3.build_driver_hints(context, filters) refs = self.catalog_api.list_regions(hints) return RegionV3.wrap_collection(context, refs, hints=hints) @controller.protected() def get_region(self, context, region_id): ref = self.catalog_api.get_region(region_id) return RegionV3.wrap_member(context, ref) @controller.protected() @validation.validated(schema.region_update, 'region') def update_region(self, context, region_id, region): self._require_matching_id(region_id, region) initiator = notifications._get_request_audit_info(context) ref = self.catalog_api.update_region(region_id, region, initiator) return RegionV3.wrap_member(context, ref) @controller.protected() def delete_region(self, context, region_id): initiator = notifications._get_request_audit_info(context) return self.catalog_api.delete_region(region_id, initiator) @dependency.requires('catalog_api') class ServiceV3(controller.V3Controller): collection_name = 'services' member_name = 'service' def __init__(self): super(ServiceV3, self).__init__() self.get_member_from_driver = self.catalog_api.get_service @controller.protected() @validation.validated(schema.service_create, 'service') def create_service(self, context, service): ref = self._assign_unique_id(self._normalize_dict(service)) initiator = notifications._get_request_audit_info(context) ref = self.catalog_api.create_service(ref['id'], ref, initiator) return ServiceV3.wrap_member(context, ref) @controller.filterprotected('type', 'name') def list_services(self, context, filters): hints = ServiceV3.build_driver_hints(context, filters) refs = self.catalog_api.list_services(hints=hints) return ServiceV3.wrap_collection(context, refs, hints=hints) @controller.protected() def get_service(self, context, service_id): ref = self.catalog_api.get_service(service_id) return ServiceV3.wrap_member(context, ref) @controller.protected() @validation.validated(schema.service_update, 'service') def update_service(self, context, service_id, service): self._require_matching_id(service_id, service) initiator = notifications._get_request_audit_info(context) ref = self.catalog_api.update_service(service_id, service, initiator) return ServiceV3.wrap_member(context, ref) @controller.protected() def delete_service(self, context, service_id): initiator = notifications._get_request_audit_info(context) return self.catalog_api.delete_service(service_id, initiator) @dependency.requires('catalog_api') class EndpointV3(controller.V3Controller): collection_name = 'endpoints' member_name = 'endpoint' def __init__(self): super(EndpointV3, self).__init__() self.get_member_from_driver = self.catalog_api.get_endpoint @classmethod def filter_endpoint(cls, ref): if 'legacy_endpoint_id' in ref: ref.pop('legacy_endpoint_id') ref['region'] = ref['region_id'] return ref @classmethod def wrap_member(cls, context, ref): ref = cls.filter_endpoint(ref) return super(EndpointV3, cls).wrap_member(context, ref) def _validate_endpoint_region(self, endpoint, context=None): """Ensure the region for the endpoint exists. If 'region_id' is used to specify the region, then we will let the manager/driver take care of this. If, however, 'region' is used, then for backward compatibility, we will auto-create the region. """ if (endpoint.get('region_id') is None and endpoint.get('region') is not None): # To maintain backward compatibility with clients that are # using the v3 API in the same way as they used the v2 API, # create the endpoint region, if that region does not exist # in keystone. endpoint['region_id'] = endpoint.pop('region') try: self.catalog_api.get_region(endpoint['region_id']) except exception.RegionNotFound: region = dict(id=endpoint['region_id']) initiator = notifications._get_request_audit_info(context) self.catalog_api.create_region(region, initiator) return endpoint @controller.protected() @validation.validated(schema.endpoint_create, 'endpoint') def create_endpoint(self, context, endpoint): core.check_endpoint_url(endpoint['url']) ref = self._assign_unique_id(self._normalize_dict(endpoint)) ref = self._validate_endpoint_region(ref, context) initiator = notifications._get_request_audit_info(context) ref = self.catalog_api.create_endpoint(ref['id'], ref, initiator) return EndpointV3.wrap_member(context, ref) @controller.filterprotected('interface', 'service_id', 'region_id') def list_endpoints(self, context, filters): hints = EndpointV3.build_driver_hints(context, filters) refs = self.catalog_api.list_endpoints(hints=hints) return EndpointV3.wrap_collection(context, refs, hints=hints) @controller.protected() def get_endpoint(self, context, endpoint_id): ref = self.catalog_api.get_endpoint(endpoint_id) return EndpointV3.wrap_member(context, ref) @controller.protected() @validation.validated(schema.endpoint_update, 'endpoint') def update_endpoint(self, context, endpoint_id, endpoint): self._require_matching_id(endpoint_id, endpoint) endpoint = self._validate_endpoint_region(endpoint.copy(), context) initiator = notifications._get_request_audit_info(context) ref = self.catalog_api.update_endpoint(endpoint_id, endpoint, initiator) return EndpointV3.wrap_member(context, ref) @controller.protected() def delete_endpoint(self, context, endpoint_id): initiator = notifications._get_request_audit_info(context) return self.catalog_api.delete_endpoint(endpoint_id, initiator) @dependency.requires('catalog_api', 'resource_api') class EndpointFilterV3Controller(controller.V3Controller): def __init__(self): super(EndpointFilterV3Controller, self).__init__() notifications.register_event_callback( notifications.ACTIONS.deleted, 'project', self._on_project_or_endpoint_delete) notifications.register_event_callback( notifications.ACTIONS.deleted, 'endpoint', self._on_project_or_endpoint_delete) def _on_project_or_endpoint_delete(self, service, resource_type, operation, payload): project_or_endpoint_id = payload['resource_info'] if resource_type == 'project': self.catalog_api.delete_association_by_project( project_or_endpoint_id) else: self.catalog_api.delete_association_by_endpoint( project_or_endpoint_id) @controller.protected() def add_endpoint_to_project(self, context, project_id, endpoint_id): """Establishes an association between an endpoint and a project.""" # NOTE(gyee): we just need to make sure endpoint and project exist # first. We don't really care whether if project is disabled. # The relationship can still be established even with a disabled # project as there are no security implications. self.catalog_api.get_endpoint(endpoint_id) self.resource_api.get_project(project_id) self.catalog_api.add_endpoint_to_project(endpoint_id, project_id) @controller.protected() def check_endpoint_in_project(self, context, project_id, endpoint_id): """Verifies endpoint is currently associated with given project.""" self.catalog_api.get_endpoint(endpoint_id) self.resource_api.get_project(project_id) self.catalog_api.check_endpoint_in_project(endpoint_id, project_id) @controller.protected() def list_endpoints_for_project(self, context, project_id): """List all endpoints currently associated with a given project.""" self.resource_api.get_project(project_id) filtered_endpoints = self.catalog_api.list_endpoints_for_project( project_id) return EndpointV3.wrap_collection( context, [v for v in six.itervalues(filtered_endpoints)]) @controller.protected() def remove_endpoint_from_project(self, context, project_id, endpoint_id): """Remove the endpoint from the association with given project.""" self.catalog_api.remove_endpoint_from_project(endpoint_id, project_id) @controller.protected() def list_projects_for_endpoint(self, context, endpoint_id): """Return a list of projects associated with the endpoint.""" self.catalog_api.get_endpoint(endpoint_id) refs = self.catalog_api.list_projects_for_endpoint(endpoint_id) projects = [self.resource_api.get_project( ref['project_id']) for ref in refs] return resource.controllers.ProjectV3.wrap_collection(context, projects) @dependency.requires('catalog_api', 'resource_api') class EndpointGroupV3Controller(controller.V3Controller): collection_name = 'endpoint_groups' member_name = 'endpoint_group' VALID_FILTER_KEYS = ['service_id', 'region_id', 'interface'] def __init__(self): super(EndpointGroupV3Controller, self).__init__() @classmethod def base_url(cls, context, path=None): """Construct a path and pass it to V3Controller.base_url method.""" path = '/OS-EP-FILTER/' + cls.collection_name return super(EndpointGroupV3Controller, cls).base_url(context, path=path) @controller.protected() @validation.validated(schema.endpoint_group_create, 'endpoint_group') def create_endpoint_group(self, context, endpoint_group): """Creates an Endpoint Group with the associated filters.""" ref = self._assign_unique_id(self._normalize_dict(endpoint_group)) self._require_attribute(ref, 'filters') self._require_valid_filter(ref) ref = self.catalog_api.create_endpoint_group(ref['id'], ref) return EndpointGroupV3Controller.wrap_member(context, ref) def _require_valid_filter(self, endpoint_group): filters = endpoint_group.get('filters') for key in six.iterkeys(filters): if key not in self.VALID_FILTER_KEYS: raise exception.ValidationError( attribute=self._valid_filter_keys(), target='endpoint_group') def _valid_filter_keys(self): return ' or '.join(self.VALID_FILTER_KEYS) @controller.protected() def get_endpoint_group(self, context, endpoint_group_id): """Retrieve the endpoint group associated with the id if exists.""" ref = self.catalog_api.get_endpoint_group(endpoint_group_id) return EndpointGroupV3Controller.wrap_member( context, ref) @controller.protected() @validation.validated(schema.endpoint_group_update, 'endpoint_group') def update_endpoint_group(self, context, endpoint_group_id, endpoint_group): """Update fixed values and/or extend the filters.""" if 'filters' in endpoint_group: self._require_valid_filter(endpoint_group) ref = self.catalog_api.update_endpoint_group(endpoint_group_id, endpoint_group) return EndpointGroupV3Controller.wrap_member( context, ref) @controller.protected() def delete_endpoint_group(self, context, endpoint_group_id): """Delete endpoint_group.""" self.catalog_api.delete_endpoint_group(endpoint_group_id) @controller.protected() def list_endpoint_groups(self, context): """List all endpoint groups.""" refs = self.catalog_api.list_endpoint_groups() return EndpointGroupV3Controller.wrap_collection( context, refs) @controller.protected() def list_endpoint_groups_for_project(self, context, project_id): """List all endpoint groups associated with a given project.""" return EndpointGroupV3Controller.wrap_collection( context, self.catalog_api.get_endpoint_groups_for_project(project_id)) @controller.protected() def list_projects_associated_with_endpoint_group(self, context, endpoint_group_id): """List all projects associated with endpoint group.""" endpoint_group_refs = (self.catalog_api. list_projects_associated_with_endpoint_group( endpoint_group_id)) projects = [] for endpoint_group_ref in endpoint_group_refs: project = self.resource_api.get_project( endpoint_group_ref['project_id']) if project: projects.append(project) return resource.controllers.ProjectV3.wrap_collection(context, projects) @controller.protected() def list_endpoints_associated_with_endpoint_group(self, context, endpoint_group_id): """List all the endpoints filtered by a specific endpoint group.""" filtered_endpoints = (self.catalog_api. get_endpoints_filtered_by_endpoint_group( endpoint_group_id)) return EndpointV3.wrap_collection(context, filtered_endpoints) @dependency.requires('catalog_api', 'resource_api') class ProjectEndpointGroupV3Controller(controller.V3Controller): collection_name = 'project_endpoint_groups' member_name = 'project_endpoint_group' def __init__(self): super(ProjectEndpointGroupV3Controller, self).__init__() notifications.register_event_callback( notifications.ACTIONS.deleted, 'project', self._on_project_delete) def _on_project_delete(self, service, resource_type, operation, payload): project_id = payload['resource_info'] (self.catalog_api. delete_endpoint_group_association_by_project( project_id)) @controller.protected() def get_endpoint_group_in_project(self, context, endpoint_group_id, project_id): """Retrieve the endpoint group associated with the id if exists.""" self.resource_api.get_project(project_id) self.catalog_api.get_endpoint_group(endpoint_group_id) ref = self.catalog_api.get_endpoint_group_in_project( endpoint_group_id, project_id) return ProjectEndpointGroupV3Controller.wrap_member( context, ref) @controller.protected() def add_endpoint_group_to_project(self, context, endpoint_group_id, project_id): """Creates an association between an endpoint group and project.""" self.resource_api.get_project(project_id) self.catalog_api.get_endpoint_group(endpoint_group_id) self.catalog_api.add_endpoint_group_to_project( endpoint_group_id, project_id) @controller.protected() def remove_endpoint_group_from_project(self, context, endpoint_group_id, project_id): """Remove the endpoint group from associated project.""" self.resource_api.get_project(project_id) self.catalog_api.get_endpoint_group(endpoint_group_id) self.catalog_api.remove_endpoint_group_from_project( endpoint_group_id, project_id) @classmethod def _add_self_referential_link(cls, context, ref): url = ('/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' '/projects/%(project_id)s' % { 'endpoint_group_id': ref['endpoint_group_id'], 'project_id': ref['project_id']}) ref.setdefault('links', {}) ref['links']['self'] = url keystone-9.0.0/keystone/catalog/routers.py0000664000567000056710000001776512701407102022064 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools from keystone.catalog import controllers from keystone.common import json_home from keystone.common import router from keystone.common import wsgi build_resource_relation = functools.partial( json_home.build_v3_extension_resource_relation, extension_name='OS-EP-FILTER', extension_version='1.0') build_parameter_relation = functools.partial( json_home.build_v3_extension_parameter_relation, extension_name='OS-EP-FILTER', extension_version='1.0') ENDPOINT_GROUP_PARAMETER_RELATION = build_parameter_relation( parameter_name='endpoint_group_id') class Routers(wsgi.RoutersBase): """API for the keystone catalog. The API Endpoint Filter looks like:: PUT /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id} GET /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id} HEAD /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id} DELETE /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id} GET /OS-EP-FILTER/endpoints/{endpoint_id}/projects GET /OS-EP-FILTER/projects/{project_id}/endpoints GET /OS-EP-FILTER/projects/{project_id}/endpoint_groups GET /OS-EP-FILTER/endpoint_groups POST /OS-EP-FILTER/endpoint_groups GET /OS-EP-FILTER/endpoint_groups/{endpoint_group_id} HEAD /OS-EP-FILTER/endpoint_groups/{endpoint_group_id} PATCH /OS-EP-FILTER/endpoint_groups/{endpoint_group_id} DELETE /OS-EP-FILTER/endpoint_groups/{endpoint_group_id} GET /OS-EP-FILTER/endpoint_groups/{endpoint_group_id}/projects GET /OS-EP-FILTER/endpoint_groups/{endpoint_group_id}/endpoints PUT /OS-EP-FILTER/endpoint_groups/{endpoint_group}/projects/ {project_id} GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}/projects/ {project_id} HEAD /OS-EP-FILTER/endpoint_groups/{endpoint_group}/projects/ {project_id} DELETE /OS-EP-FILTER/endpoint_groups/{endpoint_group}/projects/ {project_id} """ PATH_PREFIX = '/OS-EP-FILTER' PATH_PROJECT_ENDPOINT = '/projects/{project_id}/endpoints/{endpoint_id}' PATH_ENDPOINT_GROUPS = '/endpoint_groups/{endpoint_group_id}' PATH_ENDPOINT_GROUP_PROJECTS = PATH_ENDPOINT_GROUPS + ( '/projects/{project_id}') def append_v3_routers(self, mapper, routers): regions_controller = controllers.RegionV3() endpoint_filter_controller = controllers.EndpointFilterV3Controller() endpoint_group_controller = controllers.EndpointGroupV3Controller() project_endpoint_group_controller = ( controllers.ProjectEndpointGroupV3Controller()) routers.append(router.Router(regions_controller, 'regions', 'region', resource_descriptions=self.v3_resources)) # Need to add an additional route to support PUT /regions/{region_id} mapper.connect( '/regions/{region_id}', controller=regions_controller, action='create_region_with_id', conditions=dict(method=['PUT'])) routers.append(router.Router(controllers.ServiceV3(), 'services', 'service', resource_descriptions=self.v3_resources)) routers.append(router.Router(controllers.EndpointV3(), 'endpoints', 'endpoint', resource_descriptions=self.v3_resources)) self._add_resource( mapper, endpoint_filter_controller, path=self.PATH_PREFIX + '/endpoints/{endpoint_id}/projects', get_action='list_projects_for_endpoint', rel=build_resource_relation(resource_name='endpoint_projects'), path_vars={ 'endpoint_id': json_home.Parameters.ENDPOINT_ID, }) self._add_resource( mapper, endpoint_filter_controller, path=self.PATH_PREFIX + self.PATH_PROJECT_ENDPOINT, get_head_action='check_endpoint_in_project', put_action='add_endpoint_to_project', delete_action='remove_endpoint_from_project', rel=build_resource_relation(resource_name='project_endpoint'), path_vars={ 'endpoint_id': json_home.Parameters.ENDPOINT_ID, 'project_id': json_home.Parameters.PROJECT_ID, }) self._add_resource( mapper, endpoint_filter_controller, path=self.PATH_PREFIX + '/projects/{project_id}/endpoints', get_action='list_endpoints_for_project', rel=build_resource_relation(resource_name='project_endpoints'), path_vars={ 'project_id': json_home.Parameters.PROJECT_ID, }) self._add_resource( mapper, endpoint_group_controller, path=self.PATH_PREFIX + '/projects/{project_id}/endpoint_groups', get_action='list_endpoint_groups_for_project', rel=build_resource_relation( resource_name='project_endpoint_groups'), path_vars={ 'project_id': json_home.Parameters.PROJECT_ID, }) self._add_resource( mapper, endpoint_group_controller, path=self.PATH_PREFIX + '/endpoint_groups', get_action='list_endpoint_groups', post_action='create_endpoint_group', rel=build_resource_relation(resource_name='endpoint_groups')) self._add_resource( mapper, endpoint_group_controller, path=self.PATH_PREFIX + self.PATH_ENDPOINT_GROUPS, get_head_action='get_endpoint_group', patch_action='update_endpoint_group', delete_action='delete_endpoint_group', rel=build_resource_relation(resource_name='endpoint_group'), path_vars={ 'endpoint_group_id': ENDPOINT_GROUP_PARAMETER_RELATION }) self._add_resource( mapper, project_endpoint_group_controller, path=self.PATH_PREFIX + self.PATH_ENDPOINT_GROUP_PROJECTS, get_head_action='get_endpoint_group_in_project', put_action='add_endpoint_group_to_project', delete_action='remove_endpoint_group_from_project', rel=build_resource_relation( resource_name='endpoint_group_to_project_association'), path_vars={ 'project_id': json_home.Parameters.PROJECT_ID, 'endpoint_group_id': ENDPOINT_GROUP_PARAMETER_RELATION }) self._add_resource( mapper, endpoint_group_controller, path=self.PATH_PREFIX + self.PATH_ENDPOINT_GROUPS + ( '/projects'), get_action='list_projects_associated_with_endpoint_group', rel=build_resource_relation( resource_name='projects_associated_with_endpoint_group'), path_vars={ 'endpoint_group_id': ENDPOINT_GROUP_PARAMETER_RELATION }) self._add_resource( mapper, endpoint_group_controller, path=self.PATH_PREFIX + self.PATH_ENDPOINT_GROUPS + ( '/endpoints'), get_action='list_endpoints_associated_with_endpoint_group', rel=build_resource_relation( resource_name='endpoints_in_endpoint_group'), path_vars={ 'endpoint_group_id': ENDPOINT_GROUP_PARAMETER_RELATION }) keystone-9.0.0/keystone/auth/0000775000567000056710000000000012701407246017327 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/auth/__init__.py0000664000567000056710000000124112701407102021425 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.auth import controllers # noqa from keystone.auth.core import * # noqa keystone-9.0.0/keystone/auth/plugins/0000775000567000056710000000000012701407246021010 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone/auth/plugins/saml2.py0000664000567000056710000000213512701407102022370 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from keystone.auth.plugins import mapped @versionutils.deprecated( versionutils.deprecated.MITAKA, what='keystone.auth.plugins.saml2.Saml2', in_favor_of='keystone.auth.plugins.mapped.Mapped', remove_in=+2) class Saml2(mapped.Mapped): """Provide an entry point to authenticate with SAML2. This plugin subclasses ``mapped.Mapped``, and may be specified in keystone.conf:: [auth] methods = external,password,token,saml2 saml2 = keystone.auth.plugins.mapped.Mapped """ pass keystone-9.0.0/keystone/auth/plugins/external.py0000664000567000056710000000673112701407102023202 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Keystone External Authentication Plugins""" import abc from oslo_config import cfg import six from keystone import auth from keystone.common import dependency from keystone import exception from keystone.i18n import _ CONF = cfg.CONF @six.add_metaclass(abc.ABCMeta) class Base(auth.AuthMethodHandler): def authenticate(self, context, auth_info, auth_context): """Use REMOTE_USER to look up the user in the identity backend. auth_context is an in-out variable that will be updated with the user_id from the actual user from the REMOTE_USER env variable. """ try: REMOTE_USER = context['environment']['REMOTE_USER'] except KeyError: msg = _('No authenticated user') raise exception.Unauthorized(msg) try: user_ref = self._authenticate(REMOTE_USER, context) auth_context['user_id'] = user_ref['id'] if ('kerberos' in CONF.token.bind and (context['environment'].get('AUTH_TYPE', '').lower() == 'negotiate')): auth_context['bind']['kerberos'] = user_ref['name'] except Exception: msg = _('Unable to lookup user %s') % (REMOTE_USER) raise exception.Unauthorized(msg) @abc.abstractmethod def _authenticate(self, remote_user, context): """Look up the user in the identity backend. Return user_ref """ pass @dependency.requires('identity_api') class DefaultDomain(Base): def _authenticate(self, remote_user, context): """Use remote_user to look up the user in the identity backend.""" domain_id = CONF.identity.default_domain_id user_ref = self.identity_api.get_user_by_name(remote_user, domain_id) return user_ref @dependency.requires('identity_api', 'resource_api') class Domain(Base): def _authenticate(self, remote_user, context): """Use remote_user to look up the user in the identity backend. The domain will be extracted from the REMOTE_DOMAIN environment variable if present. If not, the default domain will be used. """ username = remote_user try: domain_name = context['environment']['REMOTE_DOMAIN'] except KeyError: domain_id = CONF.identity.default_domain_id else: domain_ref = self.resource_api.get_domain_by_name(domain_name) domain_id = domain_ref['id'] user_ref = self.identity_api.get_user_by_name(username, domain_id) return user_ref class KerberosDomain(Domain): """Allows `kerberos` as a method.""" def _authenticate(self, remote_user, context): auth_type = context['environment'].get('AUTH_TYPE') if auth_type != 'Negotiate': raise exception.Unauthorized(_("auth_type is not Negotiate")) return super(KerberosDomain, self)._authenticate(remote_user, context) keystone-9.0.0/keystone/auth/plugins/token.py0000664000567000056710000000746312701407102022503 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log import six from keystone import auth from keystone.auth.plugins import mapped from keystone.common import dependency from keystone.common import wsgi from keystone import exception from keystone.i18n import _ from keystone.models import token_model LOG = log.getLogger(__name__) CONF = cfg.CONF @dependency.requires('federation_api', 'identity_api', 'token_provider_api') class Token(auth.AuthMethodHandler): def _get_token_ref(self, auth_payload): token_id = auth_payload['id'] response = self.token_provider_api.validate_token(token_id) return token_model.KeystoneToken(token_id=token_id, token_data=response) def authenticate(self, context, auth_payload, user_context): if 'id' not in auth_payload: raise exception.ValidationError(attribute='id', target='token') token_ref = self._get_token_ref(auth_payload) if token_ref.is_federated_user and self.federation_api: mapped.handle_scoped_token( context, auth_payload, user_context, token_ref, self.federation_api, self.identity_api, self.token_provider_api) else: token_authenticate(context, auth_payload, user_context, token_ref) def token_authenticate(context, auth_payload, user_context, token_ref): try: # Do not allow tokens used for delegation to # create another token, or perform any changes of # state in Keystone. To do so is to invite elevation of # privilege attacks if token_ref.oauth_scoped or token_ref.trust_scoped: raise exception.Forbidden() if not CONF.token.allow_rescope_scoped_token: # Do not allow conversion from scoped tokens. if token_ref.project_scoped or token_ref.domain_scoped: raise exception.Forbidden(action=_("rescope a scoped token")) wsgi.validate_token_bind(context, token_ref) # New tokens maintain the audit_id of the original token in the # chain (if possible) as the second element in the audit data # structure. Look for the last element in the audit data structure # which will be either the audit_id of the token (in the case of # a token that has not been rescoped) or the audit_chain id (in # the case of a token that has been rescoped). try: token_audit_id = token_ref.get('audit_ids', [])[-1] except IndexError: # NOTE(morganfainberg): In the case this is a token that was # issued prior to audit id existing, the chain is not tracked. token_audit_id = None user_context.setdefault('expires_at', token_ref.expires) user_context['audit_id'] = token_audit_id user_context.setdefault('user_id', token_ref.user_id) # TODO(morganfainberg: determine if token 'extras' can be removed # from the user_context user_context['extras'].update(token_ref.get('extras', {})) user_context['method_names'].extend(token_ref.methods) except AssertionError as e: LOG.error(six.text_type(e)) raise exception.Unauthorized(e) keystone-9.0.0/keystone/auth/plugins/__init__.py0000664000567000056710000000115312701407102023110 0ustar jenkinsjenkins00000000000000# Copyright 2015 CERN # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.auth.plugins.core import * # noqa keystone-9.0.0/keystone/auth/plugins/core.py0000664000567000056710000001617412701407102022312 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from oslo_config import cfg from oslo_log import log import six from keystone.common import dependency from keystone import exception CONF = cfg.CONF LOG = log.getLogger(__name__) def construct_method_map_from_config(): """Determine authentication method types for deployment. :returns: a dictionary containing the methods and their indexes """ method_map = dict() method_index = 1 for method in CONF.auth.methods: method_map[method_index] = method method_index = method_index * 2 return method_map def convert_method_list_to_integer(methods): """Convert the method type(s) to an integer. :param methods: a list of method names :returns: an integer representing the methods """ method_map = construct_method_map_from_config() method_ints = [] for method in methods: for k, v in method_map.items(): if v == method: method_ints.append(k) return sum(method_ints) def convert_integer_to_method_list(method_int): """Convert an integer to a list of methods. :param method_int: an integer representing methods :returns: a corresponding list of methods """ # If the method_int is 0 then no methods were used so return an empty # method list if method_int == 0: return [] method_map = construct_method_map_from_config() method_ints = [] for k, v in method_map.items(): method_ints.append(k) method_ints.sort(reverse=True) confirmed_methods = [] for m_int in method_ints: # (lbragstad): By dividing the method_int by each key in the # method_map, we know if the division results in an integer of 1, that # key was used in the construction of the total sum of the method_int. # In that case, we should confirm the key value and store it so we can # look it up later. Then we should take the remainder of what is # confirmed and the method_int and continue the process. In the end, we # should have a list of integers that correspond to indexes in our # method_map and we can reinflate the methods that the original # method_int represents. if (method_int / m_int) == 1: confirmed_methods.append(m_int) method_int = method_int - m_int methods = [] for method in confirmed_methods: methods.append(method_map[method]) return methods @dependency.requires('identity_api', 'resource_api') class BaseUserInfo(object): @classmethod def create(cls, auth_payload, method_name): user_auth_info = cls() user_auth_info._validate_and_normalize_auth_data(auth_payload) user_auth_info.METHOD_NAME = method_name return user_auth_info def __init__(self): self.user_id = None self.user_ref = None self.METHOD_NAME = None def _assert_domain_is_enabled(self, domain_ref): try: self.resource_api.assert_domain_enabled( domain_id=domain_ref['id'], domain=domain_ref) except AssertionError as e: LOG.warning(six.text_type(e)) six.reraise(exception.Unauthorized, exception.Unauthorized(e), sys.exc_info()[2]) def _assert_user_is_enabled(self, user_ref): try: self.identity_api.assert_user_enabled( user_id=user_ref['id'], user=user_ref) except AssertionError as e: LOG.warning(six.text_type(e)) six.reraise(exception.Unauthorized, exception.Unauthorized(e), sys.exc_info()[2]) def _lookup_domain(self, domain_info): domain_id = domain_info.get('id') domain_name = domain_info.get('name') domain_ref = None if not domain_id and not domain_name: raise exception.ValidationError(attribute='id or name', target='domain') try: if domain_name: domain_ref = self.resource_api.get_domain_by_name( domain_name) else: domain_ref = self.resource_api.get_domain(domain_id) except exception.DomainNotFound as e: LOG.exception(six.text_type(e)) raise exception.Unauthorized(e) self._assert_domain_is_enabled(domain_ref) return domain_ref def _validate_and_normalize_auth_data(self, auth_payload): if 'user' not in auth_payload: raise exception.ValidationError(attribute='user', target=self.METHOD_NAME) user_info = auth_payload['user'] user_id = user_info.get('id') user_name = user_info.get('name') user_ref = None if not user_id and not user_name: raise exception.ValidationError(attribute='id or name', target='user') try: if user_name: if 'domain' not in user_info: raise exception.ValidationError(attribute='domain', target='user') domain_ref = self._lookup_domain(user_info['domain']) user_ref = self.identity_api.get_user_by_name( user_name, domain_ref['id']) else: user_ref = self.identity_api.get_user(user_id) domain_ref = self.resource_api.get_domain( user_ref['domain_id']) self._assert_domain_is_enabled(domain_ref) except exception.UserNotFound as e: LOG.exception(six.text_type(e)) raise exception.Unauthorized(e) self._assert_user_is_enabled(user_ref) self.user_ref = user_ref self.user_id = user_ref['id'] self.domain_id = domain_ref['id'] class UserAuthInfo(BaseUserInfo): def __init__(self): super(UserAuthInfo, self).__init__() self.password = None def _validate_and_normalize_auth_data(self, auth_payload): super(UserAuthInfo, self)._validate_and_normalize_auth_data( auth_payload) user_info = auth_payload['user'] self.password = user_info.get('password') class TOTPUserInfo(BaseUserInfo): def __init__(self): super(TOTPUserInfo, self).__init__() self.passcode = None def _validate_and_normalize_auth_data(self, auth_payload): super(TOTPUserInfo, self)._validate_and_normalize_auth_data( auth_payload) user_info = auth_payload['user'] self.passcode = user_info.get('passcode') keystone-9.0.0/keystone/auth/plugins/password.py0000664000567000056710000000274112701407102023217 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone import auth from keystone.auth import plugins as auth_plugins from keystone.common import dependency from keystone import exception from keystone.i18n import _ METHOD_NAME = 'password' @dependency.requires('identity_api') class Password(auth.AuthMethodHandler): def authenticate(self, context, auth_payload, auth_context): """Try to authenticate against the identity backend.""" user_info = auth_plugins.UserAuthInfo.create(auth_payload, METHOD_NAME) try: self.identity_api.authenticate( context, user_id=user_info.user_id, password=user_info.password) except AssertionError: # authentication failed because of invalid username or password msg = _('Invalid username or password') raise exception.Unauthorized(msg) auth_context['user_id'] = user_info.user_id keystone-9.0.0/keystone/auth/plugins/oauth1.py0000664000567000056710000000474012701407102022557 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import timeutils from keystone import auth from keystone.common import controller from keystone.common import dependency from keystone import exception from keystone.i18n import _ from keystone.oauth1 import core as oauth from keystone.oauth1 import validator @dependency.requires('oauth_api') class OAuth(auth.AuthMethodHandler): def authenticate(self, context, auth_info, auth_context): """Turn a signed request with an access key into a keystone token.""" headers = context['headers'] oauth_headers = oauth.get_oauth_headers(headers) access_token_id = oauth_headers.get('oauth_token') if not access_token_id: raise exception.ValidationError( attribute='oauth_token', target='request') acc_token = self.oauth_api.get_access_token(access_token_id) expires_at = acc_token['expires_at'] if expires_at: now = timeutils.utcnow() expires = timeutils.normalize_time( timeutils.parse_isotime(expires_at)) if now > expires: raise exception.Unauthorized(_('Access token is expired')) url = controller.V3Controller.base_url(context, context['path']) access_verifier = oauth.ResourceEndpoint( request_validator=validator.OAuthValidator(), token_generator=oauth.token_generator) result, request = access_verifier.validate_protected_resource_request( url, http_method='POST', body=context['query_string'], headers=headers, realms=None ) if not result: msg = _('Could not validate the access token') raise exception.Unauthorized(msg) auth_context['user_id'] = acc_token['authorizing_user_id'] auth_context['access_token_id'] = access_token_id auth_context['project_id'] = acc_token['project_id'] keystone-9.0.0/keystone/auth/plugins/totp.py0000664000567000056710000000707112701407102022344 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Time-based One-time Password Algorithm (TOTP) auth plugin TOTP is an algorithm that computes a one-time password from a shared secret key and the current time. TOTP is an implementation of a hash-based message authentication code (HMAC). It combines a secret key with the current timestamp using a cryptographic hash function to generate a one-time password. The timestamp typically increases in 30-second intervals, so passwords generated close together in time from the same secret key will be equal. """ import base64 import time from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import hashes from cryptography.hazmat.primitives.twofactor import totp as crypto_totp from oslo_log import log import six from keystone import auth from keystone.auth import plugins from keystone.common import dependency from keystone import exception from keystone.i18n import _ METHOD_NAME = 'totp' LOG = log.getLogger(__name__) def _generate_totp_passcode(secret): """Generate TOTP passcode. :param bytes secret: A base32 encoded secret for the TOTP authentication :returns: totp passcode as bytes """ if isinstance(secret, six.text_type): # NOTE(dstanek): since this may be coming from the JSON stored in the # database it may be UTF-8 encoded secret = secret.encode('utf-8') # NOTE(nonameentername): cryptography takes a non base32 encoded value for # TOTP. Add the correct padding to be able to base32 decode while len(secret) % 8 != 0: secret = secret + b'=' decoded = base64.b32decode(secret) totp = crypto_totp.TOTP( decoded, 6, hashes.SHA1(), 30, backend=default_backend()) return totp.generate(time.time()) @dependency.requires('credential_api') class TOTP(auth.AuthMethodHandler): def authenticate(self, context, auth_payload, auth_context): """Try to authenticate using TOTP""" user_info = plugins.TOTPUserInfo.create(auth_payload, METHOD_NAME) auth_passcode = auth_payload.get('user').get('passcode') credentials = self.credential_api.list_credentials_for_user( user_info.user_id, type='totp') valid_passcode = False for credential in credentials: try: generated_passcode = _generate_totp_passcode( credential['blob']) if auth_passcode == generated_passcode: valid_passcode = True break except (ValueError, KeyError): LOG.debug('No TOTP match; credential id: %s, user_id: %s', credential['id'], user_info.user_id) except (TypeError): LOG.debug('Base32 decode failed for TOTP credential %s', credential['id']) if not valid_passcode: # authentication failed because of invalid username or passcode msg = _('Invalid username or TOTP passcode') raise exception.Unauthorized(msg) auth_context['user_id'] = user_info.user_id keystone-9.0.0/keystone/auth/plugins/mapped.py0000664000567000056710000002507112701407102022624 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools from pycadf import cadftaxonomy as taxonomy from six.moves.urllib import parse from keystone import auth from keystone.auth import plugins as auth_plugins from keystone.common import dependency from keystone import exception from keystone.federation import constants as federation_constants from keystone.federation import utils from keystone.i18n import _ from keystone.models import token_model from keystone import notifications METHOD_NAME = 'mapped' @dependency.requires('federation_api', 'identity_api', 'resource_api', 'token_provider_api') class Mapped(auth.AuthMethodHandler): def _get_token_ref(self, auth_payload): token_id = auth_payload['id'] response = self.token_provider_api.validate_token(token_id) return token_model.KeystoneToken(token_id=token_id, token_data=response) def authenticate(self, context, auth_payload, auth_context): """Authenticate mapped user and set an authentication context. :param context: keystone's request context :param auth_payload: the content of the authentication for a given method :param auth_context: user authentication context, a dictionary shared by all plugins. In addition to ``user_id`` in ``auth_context``, this plugin sets ``group_ids``, ``OS-FEDERATION:identity_provider`` and ``OS-FEDERATION:protocol`` """ if 'id' in auth_payload: token_ref = self._get_token_ref(auth_payload) handle_scoped_token(context, auth_payload, auth_context, token_ref, self.federation_api, self.identity_api, self.token_provider_api) else: handle_unscoped_token(context, auth_payload, auth_context, self.resource_api, self.federation_api, self.identity_api) def handle_scoped_token(context, auth_payload, auth_context, token_ref, federation_api, identity_api, token_provider_api): utils.validate_expiration(token_ref) token_audit_id = token_ref.audit_id identity_provider = token_ref.federation_idp_id protocol = token_ref.federation_protocol_id user_id = token_ref.user_id group_ids = token_ref.federation_group_ids send_notification = functools.partial( notifications.send_saml_audit_notification, 'authenticate', context, user_id, group_ids, identity_provider, protocol, token_audit_id) utils.assert_enabled_identity_provider(federation_api, identity_provider) try: mapping = federation_api.get_mapping_from_idp_and_protocol( identity_provider, protocol) utils.validate_groups(group_ids, mapping['id'], identity_api) except Exception: # NOTE(topol): Diaper defense to catch any exception, so we can # send off failed authentication notification, raise the exception # after sending the notification send_notification(taxonomy.OUTCOME_FAILURE) raise else: send_notification(taxonomy.OUTCOME_SUCCESS) auth_context['user_id'] = user_id auth_context['group_ids'] = group_ids auth_context[federation_constants.IDENTITY_PROVIDER] = identity_provider auth_context[federation_constants.PROTOCOL] = protocol def handle_unscoped_token(context, auth_payload, auth_context, resource_api, federation_api, identity_api): def is_ephemeral_user(mapped_properties): return mapped_properties['user']['type'] == utils.UserType.EPHEMERAL def build_ephemeral_user_context(auth_context, user, mapped_properties, identity_provider, protocol): auth_context['user_id'] = user['id'] auth_context['group_ids'] = mapped_properties['group_ids'] auth_context[federation_constants.IDENTITY_PROVIDER] = ( identity_provider) auth_context[federation_constants.PROTOCOL] = protocol def build_local_user_context(auth_context, mapped_properties): user_info = auth_plugins.UserAuthInfo.create(mapped_properties, METHOD_NAME) auth_context['user_id'] = user_info.user_id assertion = extract_assertion_data(context) identity_provider = auth_payload['identity_provider'] protocol = auth_payload['protocol'] utils.assert_enabled_identity_provider(federation_api, identity_provider) group_ids = None # NOTE(topol): The user is coming in from an IdP with a SAML assertion # instead of from a token, so we set token_id to None token_id = None # NOTE(marek-denis): This variable is set to None and there is a # possibility that it will be used in the CADF notification. This means # operation will not be mapped to any user (even ephemeral). user_id = None try: try: mapped_properties, mapping_id = apply_mapping_filter( identity_provider, protocol, assertion, resource_api, federation_api, identity_api) except exception.ValidationError as e: # if mapping is either invalid or yield no valid identity, # it is considered a failed authentication raise exception.Unauthorized(e) if is_ephemeral_user(mapped_properties): unique_id, display_name = ( get_user_unique_id_and_display_name(context, mapped_properties) ) user = identity_api.shadow_federated_user(identity_provider, protocol, unique_id, display_name) user_id = user['id'] group_ids = mapped_properties['group_ids'] utils.validate_groups_cardinality(group_ids, mapping_id) build_ephemeral_user_context(auth_context, user, mapped_properties, identity_provider, protocol) else: build_local_user_context(auth_context, mapped_properties) except Exception: # NOTE(topol): Diaper defense to catch any exception, so we can # send off failed authentication notification, raise the exception # after sending the notification outcome = taxonomy.OUTCOME_FAILURE notifications.send_saml_audit_notification('authenticate', context, user_id, group_ids, identity_provider, protocol, token_id, outcome) raise else: outcome = taxonomy.OUTCOME_SUCCESS notifications.send_saml_audit_notification('authenticate', context, user_id, group_ids, identity_provider, protocol, token_id, outcome) def extract_assertion_data(context): assertion = dict(utils.get_assertion_params_from_env(context)) return assertion def apply_mapping_filter(identity_provider, protocol, assertion, resource_api, federation_api, identity_api): idp = federation_api.get_idp(identity_provider) utils.validate_idp(idp, protocol, assertion) mapped_properties, mapping_id = federation_api.evaluate( identity_provider, protocol, assertion) # NOTE(marek-denis): We update group_ids only here to avoid fetching # groups identified by name/domain twice. # NOTE(marek-denis): Groups are translated from name/domain to their # corresponding ids in the auth plugin, as we need information what # ``mapping_id`` was used as well as idenity_api and resource_api # objects. group_ids = mapped_properties['group_ids'] utils.validate_groups_in_backend(group_ids, mapping_id, identity_api) group_ids.extend( utils.transform_to_group_ids( mapped_properties['group_names'], mapping_id, identity_api, resource_api)) mapped_properties['group_ids'] = list(set(group_ids)) return mapped_properties, mapping_id def get_user_unique_id_and_display_name(context, mapped_properties): """Setup federated username. Function covers all the cases for properly setting user id, a primary identifier for identity objects. Initial version of the mapping engine assumed user is identified by ``name`` and his ``id`` is built from the name. We, however need to be able to accept local rules that identify user by either id or name/domain. The following use-cases are covered: 1) If neither user_name nor user_id is set raise exception.Unauthorized 2) If user_id is set and user_name not, set user_name equal to user_id 3) If user_id is not set and user_name is, set user_id as url safe version of user_name. :param context: authentication context :param mapped_properties: Properties issued by a RuleProcessor. :type: dictionary :raises keystone.exception.Unauthorized: If neither `user_name` nor `user_id` is set. :returns: tuple with user identification :rtype: tuple """ user = mapped_properties['user'] user_id = user.get('id') user_name = user.get('name') or context['environment'].get('REMOTE_USER') if not any([user_id, user_name]): msg = _("Could not map user while setting ephemeral user identity. " "Either mapping rules must specify user id/name or " "REMOTE_USER environment variable must be set.") raise exception.Unauthorized(msg) elif not user_name: user['name'] = user_id elif not user_id: user_id = user_name user['id'] = parse.quote(user_id) return (user['id'], user['name']) keystone-9.0.0/keystone/auth/core.py0000664000567000056710000000707412701407102020630 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import six from keystone import exception @six.add_metaclass(abc.ABCMeta) class AuthMethodHandler(object): """Abstract base class for an authentication plugin.""" def __init__(self): pass @abc.abstractmethod def authenticate(self, context, auth_payload, auth_context): """Authenticate user and return an authentication context. :param context: keystone's request context :param auth_payload: the content of the authentication for a given method :param auth_context: user authentication context, a dictionary shared by all plugins. It contains "method_names" and "extras" by default. "method_names" is a list and "extras" is a dictionary. If successful, plugin must set ``user_id`` in ``auth_context``. ``method_name`` is used to convey any additional authentication methods in case authentication is for re-scoping. For example, if the authentication is for re-scoping, plugin must append the previous method names into ``method_names``. Also, plugin may add any additional information into ``extras``. Anything in ``extras`` will be conveyed in the token's ``extras`` attribute. Here's an example of ``auth_context`` on successful authentication:: { "extras": {}, "methods": [ "password", "token" ], "user_id": "abc123" } Plugins are invoked in the order in which they are specified in the ``methods`` attribute of the ``identity`` object. For example, ``custom-plugin`` is invoked before ``password``, which is invoked before ``token`` in the following authentication request:: { "auth": { "identity": { "custom-plugin": { "custom-data": "sdfdfsfsfsdfsf" }, "methods": [ "custom-plugin", "password", "token" ], "password": { "user": { "id": "s23sfad1", "password": "secrete" } }, "token": { "id": "sdfafasdfsfasfasdfds" } } } } :returns: None if authentication is successful. Authentication payload in the form of a dictionary for the next authentication step if this is a multi step authentication. :raises keystone.exception.Unauthorized: for authentication failure """ raise exception.Unauthorized() keystone-9.0.0/keystone/auth/controllers.py0000664000567000056710000006702112701407102022244 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from keystoneclient.common import cms from oslo_config import cfg from oslo_log import log from oslo_log import versionutils from oslo_serialization import jsonutils from oslo_utils import importutils import six import stevedore from keystone.common import config from keystone.common import controller from keystone.common import dependency from keystone.common import utils from keystone.common import wsgi from keystone import exception from keystone.federation import constants from keystone.i18n import _, _LI, _LW from keystone.resource import controllers as resource_controllers LOG = log.getLogger(__name__) CONF = cfg.CONF # registry of authentication methods AUTH_METHODS = {} AUTH_PLUGINS_LOADED = False def load_auth_method(method): plugin_name = CONF.auth.get(method) or 'default' namespace = 'keystone.auth.%s' % method try: driver_manager = stevedore.DriverManager(namespace, plugin_name, invoke_on_load=True) return driver_manager.driver except RuntimeError: LOG.debug('Failed to load the %s driver (%s) using stevedore, will ' 'attempt to load using import_object instead.', method, plugin_name) driver = importutils.import_object(plugin_name) msg = (_( 'Direct import of auth plugin %(name)r is deprecated as of Liberty in ' 'favor of its entrypoint from %(namespace)r and may be removed in ' 'N.') % {'name': plugin_name, 'namespace': namespace}) versionutils.report_deprecated_feature(LOG, msg) return driver def load_auth_methods(): global AUTH_PLUGINS_LOADED if AUTH_PLUGINS_LOADED: # Only try and load methods a single time. return # config.setup_authentication should be idempotent, call it to ensure we # have setup all the appropriate configuration options we may need. config.setup_authentication() for plugin in set(CONF.auth.methods): AUTH_METHODS[plugin] = load_auth_method(plugin) AUTH_PLUGINS_LOADED = True def get_auth_method(method_name): global AUTH_METHODS if method_name not in AUTH_METHODS: raise exception.AuthMethodNotSupported() return AUTH_METHODS[method_name] class AuthContext(dict): """Retrofitting auth_context to reconcile identity attributes. The identity attributes must not have conflicting values among the auth plug-ins. The only exception is `expires_at`, which is set to its earliest value. """ # identity attributes need to be reconciled among the auth plugins IDENTITY_ATTRIBUTES = frozenset(['user_id', 'project_id', 'access_token_id', 'domain_id', 'expires_at']) def __setitem__(self, key, val): if key in self.IDENTITY_ATTRIBUTES and key in self: existing_val = self[key] if key == 'expires_at': # special treatment for 'expires_at', we are going to take # the earliest expiration instead. if existing_val != val: LOG.info(_LI('"expires_at" has conflicting values ' '%(existing)s and %(new)s. Will use the ' 'earliest value.'), {'existing': existing_val, 'new': val}) if existing_val is None or val is None: val = existing_val or val else: val = min(existing_val, val) elif existing_val != val: msg = _('Unable to reconcile identity attribute %(attribute)s ' 'as it has conflicting values %(new)s and %(old)s') % ( {'attribute': key, 'new': val, 'old': existing_val}) raise exception.Unauthorized(msg) return super(AuthContext, self).__setitem__(key, val) @dependency.requires('resource_api', 'trust_api') class AuthInfo(object): """Encapsulation of "auth" request.""" @staticmethod def create(context, auth=None, scope_only=False): auth_info = AuthInfo(context, auth=auth) auth_info._validate_and_normalize_auth_data(scope_only) return auth_info def __init__(self, context, auth=None): self.context = context self.auth = auth self._scope_data = (None, None, None, None) # self._scope_data is (domain_id, project_id, trust_ref, unscoped) # project scope: (None, project_id, None, None) # domain scope: (domain_id, None, None, None) # trust scope: (None, None, trust_ref, None) # unscoped: (None, None, None, 'unscoped') def _assert_project_is_enabled(self, project_ref): # ensure the project is enabled try: self.resource_api.assert_project_enabled( project_id=project_ref['id'], project=project_ref) except AssertionError as e: LOG.warning(six.text_type(e)) six.reraise(exception.Unauthorized, exception.Unauthorized(e), sys.exc_info()[2]) def _assert_domain_is_enabled(self, domain_ref): try: self.resource_api.assert_domain_enabled( domain_id=domain_ref['id'], domain=domain_ref) except AssertionError as e: LOG.warning(six.text_type(e)) six.reraise(exception.Unauthorized, exception.Unauthorized(e), sys.exc_info()[2]) def _lookup_domain(self, domain_info): domain_id = domain_info.get('id') domain_name = domain_info.get('name') domain_ref = None if not domain_id and not domain_name: raise exception.ValidationError(attribute='id or name', target='domain') try: if domain_name: if (CONF.resource.domain_name_url_safe == 'strict' and utils.is_not_url_safe(domain_name)): msg = _('Domain name cannot contain reserved characters.') raise exception.Unauthorized(message=msg) domain_ref = self.resource_api.get_domain_by_name( domain_name) else: domain_ref = self.resource_api.get_domain(domain_id) except exception.DomainNotFound as e: LOG.exception(six.text_type(e)) raise exception.Unauthorized(e) self._assert_domain_is_enabled(domain_ref) return domain_ref def _lookup_project(self, project_info): project_id = project_info.get('id') project_name = project_info.get('name') project_ref = None if not project_id and not project_name: raise exception.ValidationError(attribute='id or name', target='project') try: if project_name: if (CONF.resource.project_name_url_safe == 'strict' and utils.is_not_url_safe(project_name)): msg = _('Project name cannot contain reserved characters.') raise exception.Unauthorized(message=msg) if 'domain' not in project_info: raise exception.ValidationError(attribute='domain', target='project') domain_ref = self._lookup_domain(project_info['domain']) project_ref = self.resource_api.get_project_by_name( project_name, domain_ref['id']) else: project_ref = self.resource_api.get_project(project_id) # NOTE(morganfainberg): The _lookup_domain method will raise # exception.Unauthorized if the domain isn't found or is # disabled. self._lookup_domain({'id': project_ref['domain_id']}) except exception.ProjectNotFound as e: raise exception.Unauthorized(e) self._assert_project_is_enabled(project_ref) return project_ref def _lookup_trust(self, trust_info): trust_id = trust_info.get('id') if not trust_id: raise exception.ValidationError(attribute='trust_id', target='trust') trust = self.trust_api.get_trust(trust_id) return trust def _validate_and_normalize_scope_data(self): """Validate and normalize scope data.""" if 'scope' not in self.auth: return if sum(['project' in self.auth['scope'], 'domain' in self.auth['scope'], 'unscoped' in self.auth['scope'], 'OS-TRUST:trust' in self.auth['scope']]) != 1: raise exception.ValidationError( attribute='project, domain, OS-TRUST:trust or unscoped', target='scope') if 'unscoped' in self.auth['scope']: self._scope_data = (None, None, None, 'unscoped') return if 'project' in self.auth['scope']: project_ref = self._lookup_project(self.auth['scope']['project']) self._scope_data = (None, project_ref['id'], None, None) elif 'domain' in self.auth['scope']: domain_ref = self._lookup_domain(self.auth['scope']['domain']) self._scope_data = (domain_ref['id'], None, None, None) elif 'OS-TRUST:trust' in self.auth['scope']: if not CONF.trust.enabled: raise exception.Forbidden('Trusts are disabled.') trust_ref = self._lookup_trust( self.auth['scope']['OS-TRUST:trust']) # TODO(ayoung): when trusts support domains, fill in domain data if trust_ref.get('project_id') is not None: project_ref = self._lookup_project( {'id': trust_ref['project_id']}) self._scope_data = (None, project_ref['id'], trust_ref, None) else: self._scope_data = (None, None, trust_ref, None) def _validate_auth_methods(self): if 'identity' not in self.auth: raise exception.ValidationError(attribute='identity', target='auth') # make sure auth methods are provided if 'methods' not in self.auth['identity']: raise exception.ValidationError(attribute='methods', target='identity') # make sure all the method data/payload are provided for method_name in self.get_method_names(): if method_name not in self.auth['identity']: raise exception.ValidationError(attribute=method_name, target='identity') # make sure auth method is supported for method_name in self.get_method_names(): if method_name not in AUTH_METHODS: raise exception.AuthMethodNotSupported() def _validate_and_normalize_auth_data(self, scope_only=False): """Make sure "auth" is valid. :param scope_only: If it is True, auth methods will not be validated but only the scope data. :type scope_only: boolean """ # make sure "auth" exist if not self.auth: raise exception.ValidationError(attribute='auth', target='request body') # NOTE(chioleong): Tokenless auth does not provide auth methods, # we only care about using this method to validate the scope # information. Therefore, validating the auth methods here is # insignificant and we can skip it when scope_only is set to # true. if scope_only is False: self._validate_auth_methods() self._validate_and_normalize_scope_data() def get_method_names(self): """Returns the identity method names. :returns: list of auth method names """ # Sanitizes methods received in request's body # Filters out duplicates, while keeping elements' order. method_names = [] for method in self.auth['identity']['methods']: if method not in method_names: method_names.append(method) return method_names def get_method_data(self, method): """Get the auth method payload. :returns: auth method payload """ if method not in self.auth['identity']['methods']: raise exception.ValidationError(attribute=method, target='identity') return self.auth['identity'][method] def get_scope(self): """Get scope information. Verify and return the scoping information. :returns: (domain_id, project_id, trust_ref, unscoped). If scope to a project, (None, project_id, None, None) will be returned. If scoped to a domain, (domain_id, None, None, None) will be returned. If scoped to a trust, (None, project_id, trust_ref, None), Will be returned, where the project_id comes from the trust definition. If unscoped, (None, None, None, 'unscoped') will be returned. """ return self._scope_data def set_scope(self, domain_id=None, project_id=None, trust=None, unscoped=None): """Set scope information.""" if domain_id and project_id: msg = _('Scoping to both domain and project is not allowed') raise ValueError(msg) if domain_id and trust: msg = _('Scoping to both domain and trust is not allowed') raise ValueError(msg) if project_id and trust: msg = _('Scoping to both project and trust is not allowed') raise ValueError(msg) self._scope_data = (domain_id, project_id, trust, unscoped) @dependency.requires('assignment_api', 'catalog_api', 'identity_api', 'resource_api', 'token_provider_api', 'trust_api') class Auth(controller.V3Controller): # Note(atiwari): From V3 auth controller code we are # calling protection() wrappers, so we need to setup # the member_name and collection_name attributes of # auth controller code. # In the absence of these attributes, default 'entity' # string will be used to represent the target which is # generic. Policy can be defined using 'entity' but it # would not reflect the exact entity that is in context. # We are defining collection_name = 'tokens' and # member_name = 'token' to facilitate policy decisions. collection_name = 'tokens' member_name = 'token' def __init__(self, *args, **kw): super(Auth, self).__init__(*args, **kw) config.setup_authentication() def authenticate_for_token(self, context, auth=None): """Authenticate user and issue a token.""" include_catalog = 'nocatalog' not in context['query_string'] try: auth_info = AuthInfo.create(context, auth=auth) auth_context = AuthContext(extras={}, method_names=[], bind={}) self.authenticate(context, auth_info, auth_context) if auth_context.get('access_token_id'): auth_info.set_scope(None, auth_context['project_id'], None) self._check_and_set_default_scoping(auth_info, auth_context) (domain_id, project_id, trust, unscoped) = auth_info.get_scope() method_names = auth_info.get_method_names() method_names += auth_context.get('method_names', []) # make sure the list is unique method_names = list(set(method_names)) expires_at = auth_context.get('expires_at') # NOTE(morganfainberg): define this here so it is clear what the # argument is during the issue_v3_token provider call. metadata_ref = None token_audit_id = auth_context.get('audit_id') (token_id, token_data) = self.token_provider_api.issue_v3_token( auth_context['user_id'], method_names, expires_at, project_id, domain_id, auth_context, trust, metadata_ref, include_catalog, parent_audit_id=token_audit_id) # NOTE(wanghong): We consume a trust use only when we are using # trusts and have successfully issued a token. if trust: self.trust_api.consume_use(trust['id']) return render_token_data_response(token_id, token_data, created=True) except exception.TrustNotFound as e: raise exception.Unauthorized(e) def _check_and_set_default_scoping(self, auth_info, auth_context): (domain_id, project_id, trust, unscoped) = auth_info.get_scope() if trust: project_id = trust['project_id'] if domain_id or project_id or trust: # scope is specified return # Skip scoping when unscoped federated token is being issued if constants.IDENTITY_PROVIDER in auth_context: return # Do not scope if request is for explicitly unscoped token if unscoped is not None: return # fill in default_project_id if it is available try: user_ref = self.identity_api.get_user(auth_context['user_id']) except exception.UserNotFound as e: LOG.exception(six.text_type(e)) raise exception.Unauthorized(e) default_project_id = user_ref.get('default_project_id') if not default_project_id: # User has no default project. He shall get an unscoped token. return # make sure user's default project is legit before scoping to it try: default_project_ref = self.resource_api.get_project( default_project_id) default_project_domain_ref = self.resource_api.get_domain( default_project_ref['domain_id']) if (default_project_ref.get('enabled', True) and default_project_domain_ref.get('enabled', True)): if self.assignment_api.get_roles_for_user_and_project( user_ref['id'], default_project_id): auth_info.set_scope(project_id=default_project_id) else: msg = _LW("User %(user_id)s doesn't have access to" " default project %(project_id)s. The token" " will be unscoped rather than scoped to the" " project.") LOG.warning(msg, {'user_id': user_ref['id'], 'project_id': default_project_id}) else: msg = _LW("User %(user_id)s's default project %(project_id)s" " is disabled. The token will be unscoped rather" " than scoped to the project.") LOG.warning(msg, {'user_id': user_ref['id'], 'project_id': default_project_id}) except (exception.ProjectNotFound, exception.DomainNotFound): # default project or default project domain doesn't exist, # will issue unscoped token instead msg = _LW("User %(user_id)s's default project %(project_id)s not" " found. The token will be unscoped rather than" " scoped to the project.") LOG.warning(msg, {'user_id': user_ref['id'], 'project_id': default_project_id}) def authenticate(self, context, auth_info, auth_context): """Authenticate user.""" # The 'external' method allows any 'REMOTE_USER' based authentication # In some cases the server can set REMOTE_USER as '' instead of # dropping it, so this must be filtered out if context['environment'].get('REMOTE_USER'): try: external = get_auth_method('external') external.authenticate(context, auth_info, auth_context) except exception.AuthMethodNotSupported: # This will happen there is no 'external' plugin registered # and the container is performing authentication. # The 'kerberos' and 'saml' methods will be used this way. # In those cases, it is correct to not register an # 'external' plugin; if there is both an 'external' and a # 'kerberos' plugin, it would run the check on identity twice. LOG.debug("No 'external' plugin is registered.") except exception.Unauthorized: # If external fails then continue and attempt to determine # user identity using remaining auth methods LOG.debug("Authorization failed for 'external' auth method.") # need to aggregate the results in case two or more methods # are specified auth_response = {'methods': []} for method_name in auth_info.get_method_names(): method = get_auth_method(method_name) resp = method.authenticate(context, auth_info.get_method_data(method_name), auth_context) if resp: auth_response['methods'].append(method_name) auth_response[method_name] = resp if auth_response["methods"]: # authentication continuation required raise exception.AdditionalAuthRequired(auth_response) if 'user_id' not in auth_context: msg = _('User not found') raise exception.Unauthorized(msg) @controller.protected() def check_token(self, context): token_id = context.get('subject_token_id') token_data = self.token_provider_api.validate_v3_token( token_id) # NOTE(morganfainberg): The code in # ``keystone.common.wsgi.render_response`` will remove the content # body. return render_token_data_response(token_id, token_data) @controller.protected() def revoke_token(self, context): token_id = context.get('subject_token_id') return self.token_provider_api.revoke_token(token_id) @controller.protected() def validate_token(self, context): token_id = context.get('subject_token_id') include_catalog = 'nocatalog' not in context['query_string'] token_data = self.token_provider_api.validate_v3_token( token_id) if not include_catalog and 'catalog' in token_data['token']: del token_data['token']['catalog'] return render_token_data_response(token_id, token_data) @controller.protected() def revocation_list(self, context, auth=None): if not CONF.token.revoke_by_id: raise exception.Gone() audit_id_only = ('audit_id_only' in context['query_string']) tokens = self.token_provider_api.list_revoked_tokens() for t in tokens: expires = t['expires'] if not (expires and isinstance(expires, six.text_type)): t['expires'] = utils.isotime(expires) if audit_id_only: t.pop('id', None) data = {'revoked': tokens} if audit_id_only: # No need to obfuscate if no token IDs. return data json_data = jsonutils.dumps(data) signed_text = cms.cms_sign_text(json_data, CONF.signing.certfile, CONF.signing.keyfile) return {'signed': signed_text} def _combine_lists_uniquely(self, a, b): # it's most likely that only one of these will be filled so avoid # the combination if possible. if a and b: return {x['id']: x for x in a + b}.values() else: return a or b @controller.protected() def get_auth_projects(self, context): auth_context = self.get_auth_context(context) user_id = auth_context.get('user_id') user_refs = [] if user_id: try: user_refs = self.assignment_api.list_projects_for_user(user_id) except exception.UserNotFound: # nosec # federated users have an id but they don't link to anything pass group_ids = auth_context.get('group_ids') grp_refs = [] if group_ids: grp_refs = self.assignment_api.list_projects_for_groups(group_ids) refs = self._combine_lists_uniquely(user_refs, grp_refs) return resource_controllers.ProjectV3.wrap_collection(context, refs) @controller.protected() def get_auth_domains(self, context): auth_context = self.get_auth_context(context) user_id = auth_context.get('user_id') user_refs = [] if user_id: try: user_refs = self.assignment_api.list_domains_for_user(user_id) except exception.UserNotFound: # nosec # federated users have an id but they don't link to anything pass group_ids = auth_context.get('group_ids') grp_refs = [] if group_ids: grp_refs = self.assignment_api.list_domains_for_groups(group_ids) refs = self._combine_lists_uniquely(user_refs, grp_refs) return resource_controllers.DomainV3.wrap_collection(context, refs) @controller.protected() def get_auth_catalog(self, context): auth_context = self.get_auth_context(context) user_id = auth_context.get('user_id') project_id = auth_context.get('project_id') if not project_id: raise exception.Forbidden( _('A project-scoped token is required to produce a service ' 'catalog.')) # The V3Controller base methods mostly assume that you're returning # either a collection or a single element from a collection, neither of # which apply to the catalog. Because this is a special case, this # re-implements a tiny bit of work done by the base controller (such as # self-referential link building) to avoid overriding or refactoring # several private methods. return { 'catalog': self.catalog_api.get_v3_catalog(user_id, project_id), 'links': {'self': self.base_url(context, path='auth/catalog')} } # FIXME(gyee): not sure if it belongs here or keystone.common. Park it here # for now. def render_token_data_response(token_id, token_data, created=False): """Render token data HTTP response. Stash token ID into the X-Subject-Token header. """ headers = [('X-Subject-Token', token_id)] if created: status = (201, 'Created') else: status = (200, 'OK') return wsgi.render_response(body=token_data, status=status, headers=headers) keystone-9.0.0/keystone/auth/routers.py0000664000567000056710000000403512701407102021375 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.auth import controllers from keystone.common import json_home from keystone.common import wsgi class Routers(wsgi.RoutersBase): def append_v3_routers(self, mapper, routers): auth_controller = controllers.Auth() self._add_resource( mapper, auth_controller, path='/auth/tokens', get_action='validate_token', head_action='check_token', post_action='authenticate_for_token', delete_action='revoke_token', rel=json_home.build_v3_resource_relation('auth_tokens')) self._add_resource( mapper, auth_controller, path='/auth/tokens/OS-PKI/revoked', get_action='revocation_list', rel=json_home.build_v3_extension_resource_relation( 'OS-PKI', '1.0', 'revocations')) self._add_resource( mapper, auth_controller, path='/auth/catalog', get_action='get_auth_catalog', rel=json_home.build_v3_resource_relation('auth_catalog')) self._add_resource( mapper, auth_controller, path='/auth/projects', get_action='get_auth_projects', rel=json_home.build_v3_resource_relation('auth_projects')) self._add_resource( mapper, auth_controller, path='/auth/domains', get_action='get_auth_domains', rel=json_home.build_v3_resource_relation('auth_domains')) keystone-9.0.0/examples/0000775000567000056710000000000012701407246016343 5ustar jenkinsjenkins00000000000000keystone-9.0.0/examples/pki/0000775000567000056710000000000012701407246017126 5ustar jenkinsjenkins00000000000000keystone-9.0.0/examples/pki/gen_pki.sh0000775000567000056710000001402112701407102021066 0ustar jenkinsjenkins00000000000000#!/bin/bash # Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This script generates the crypto necessary for the SSL tests. DIR=`dirname "$0"` CURRENT_DIR=`cd "$DIR" && pwd` CERTS_DIR=$CURRENT_DIR/certs PRIVATE_DIR=$CURRENT_DIR/private CMS_DIR=$CURRENT_DIR/cms function rm_old { rm -rf $CERTS_DIR/*.pem rm -rf $PRIVATE_DIR/*.pem } function cleanup { rm -rf *.conf > /dev/null 2>&1 rm -rf index* > /dev/null 2>&1 rm -rf *.crt > /dev/null 2>&1 rm -rf newcerts > /dev/null 2>&1 rm -rf *.pem > /dev/null 2>&1 rm -rf serial* > /dev/null 2>&1 } function generate_ca_conf { echo ' [ req ] default_bits = 2048 default_keyfile = cakey.pem default_md = default prompt = no distinguished_name = ca_distinguished_name x509_extensions = ca_extensions [ ca_distinguished_name ] serialNumber = 5 countryName = US stateOrProvinceName = CA localityName = Sunnyvale organizationName = OpenStack organizationalUnitName = Keystone emailAddress = keystone@openstack.org commonName = Self Signed [ ca_extensions ] basicConstraints = critical,CA:true ' > ca.conf } function generate_ssl_req_conf { echo ' [ req ] default_bits = 2048 default_keyfile = keystonekey.pem default_md = default prompt = no distinguished_name = distinguished_name [ distinguished_name ] countryName = US stateOrProvinceName = CA localityName = Sunnyvale organizationName = OpenStack organizationalUnitName = Keystone commonName = localhost emailAddress = keystone@openstack.org ' > ssl_req.conf } function generate_cms_signing_req_conf { echo ' [ req ] default_bits = 2048 default_keyfile = keystonekey.pem default_md = default prompt = no distinguished_name = distinguished_name [ distinguished_name ] countryName = US stateOrProvinceName = CA localityName = Sunnyvale organizationName = OpenStack organizationalUnitName = Keystone commonName = Keystone emailAddress = keystone@openstack.org ' > cms_signing_req.conf } function generate_signing_conf { echo ' [ ca ] default_ca = signing_ca [ signing_ca ] dir = . database = $dir/index.txt new_certs_dir = $dir/newcerts certificate = $dir/certs/cacert.pem serial = $dir/serial private_key = $dir/private/cakey.pem default_days = 21360 default_crl_days = 30 default_md = default policy = policy_any [ policy_any ] countryName = supplied stateOrProvinceName = supplied localityName = optional organizationName = supplied organizationalUnitName = supplied emailAddress = supplied commonName = supplied ' > signing.conf } function setup { touch index.txt echo '10' > serial generate_ca_conf mkdir newcerts } function check_error { if [ $1 != 0 ] ; then echo "Failed! rc=${1}" echo 'Bailing ...' cleanup exit $1 else echo 'Done' fi } function generate_ca { echo 'Generating New CA Certificate ...' openssl req -x509 -newkey rsa:2048 -days 21360 -out $CERTS_DIR/cacert.pem \ -keyout $PRIVATE_DIR/cakey.pem -outform PEM -config ca.conf -nodes check_error $? } function ssl_cert_req { echo 'Generating SSL Certificate Request ...' generate_ssl_req_conf openssl req -newkey rsa:2048 -keyout $PRIVATE_DIR/ssl_key.pem \ -keyform PEM -out ssl_req.pem -outform PEM -config ssl_req.conf -nodes check_error $? #openssl req -in req.pem -text -noout } function cms_signing_cert_req { echo 'Generating CMS Signing Certificate Request ...' generate_cms_signing_req_conf openssl req -newkey rsa:2048 -keyout $PRIVATE_DIR/signing_key.pem \ -keyform PEM -out cms_signing_req.pem -outform PEM \ -config cms_signing_req.conf -nodes check_error $? #openssl req -in req.pem -text -noout } function issue_certs { generate_signing_conf echo 'Issuing SSL Certificate ...' openssl ca -in ssl_req.pem -config signing.conf -batch check_error $? openssl x509 -in $CURRENT_DIR/newcerts/10.pem -out $CERTS_DIR/ssl_cert.pem check_error $? echo 'Issuing CMS Signing Certificate ...' openssl ca -in cms_signing_req.pem -config signing.conf -batch check_error $? openssl x509 -in $CURRENT_DIR/newcerts/11.pem \ -out $CERTS_DIR/signing_cert.pem check_error $? } function create_middleware_cert { cp $CERTS_DIR/ssl_cert.pem $CERTS_DIR/middleware.pem cat $PRIVATE_DIR/ssl_key.pem >> $CERTS_DIR/middleware.pem } function check_openssl { echo 'Checking openssl availability ...' which openssl check_error $? } function gen_sample_cms { FILES="${CMS_DIR}/auth_token_revoked.json" FILES+=" ${CMS_DIR}/auth_token_unscoped.json" FILES+=" ${CMS_DIR}/auth_token_scoped.json" FILES+=" ${CMS_DIR}/revocation_list.json" for json_file in $FILES; do openssl cms -sign -in $json_file -nosmimecap \ -signer $CERTS_DIR/signing_cert.pem \ -inkey $PRIVATE_DIR/signing_key.pem -outform PEM -nodetach \ -nocerts -noattr -out ${json_file/.json/.pem} done } check_openssl rm_old cleanup setup generate_ca ssl_cert_req cms_signing_cert_req issue_certs create_middleware_cert gen_sample_cms cleanup keystone-9.0.0/examples/pki/private/0000775000567000056710000000000012701407246020600 5ustar jenkinsjenkins00000000000000keystone-9.0.0/examples/pki/private/cakey.pem0000664000567000056710000000325012701407102022366 0ustar jenkinsjenkins00000000000000-----BEGIN PRIVATE KEY----- MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCh1U+N3g2cjFi7 GeVf21FIv8MDhughFCey9rysAuqFONSFYo2rectLgpDtVy4BFFUFlxmh8Ci9TEZ5 LiA31tbc4584GxvlLt4dg8aFsUJRBKq0L9i7W5v9uFpHrY1Zr+P4vwG+v7IWOuzw 19f517eGpp6LLcj2vrpN9Yb63rrydKOqr0KJodMd+vFKmi+euFcPqs6sw1OiC5Dp JN479CGl2Fs1WzMoKDedRNiXG7ysrVrYQIkfMBABBPIwilq1xXZz9Ybo0PbNgOu6 xpSsy9hq+IzxcwYsr5CwIcbqW6Ju+Ti2iBEaff20lW7dFzO4kwrcqOr9Jnn7qE8Y fJo9Hyj3AgMBAAECggEAPeEVaTaF190mNGyDczKmEv4X8CpOag+N2nVT0SXQTJ5d TJ9RckbAwB+tkMLr+Uev9tI+39e3jCI1NDK56QAB6jYy9D4RXYGdNoXji80qgVYa e4lsAr/Vlp8+DfhDew6xSbSnUytzSeLAJJsznvmn2Bmvt6ILHKXzEMoYEabGrtvk 0n31mmd6sszW6i1cYEhr3gK/VXaO4gM1oWit9aeIJDg3/D3UNUW7aoCTeCz91Gif 87/JH3UIPEIt960jb3oV7ltajRSpiSOfefJFwz/2n09+/P/Sg1+SWAraqkqaLqhO zoslYSYUuOQv+j97iD/tDVBjiWR1TrzQjf/3noOl+QKBgQDTExaIe0YYI8KdBNZ6 1cG3vztNWDh0PaP1n0n/bJYAGmAfxfn/gSrABXfeIAjy01f76EK2lPa/i8+DR7vL dJnUMO10OxaIZKr+OtR1XrMM6kREj6H5yHTNz0sJ3hDEfwJ1BndqwrXlCLAe7upe veXI9LVfPjPVmf8t9UwyxtaNiwKBgQDERzCGEuyKIeSfgytcdknJ0W+AbdkshC92 tZQPbI35YOLac2/y7GMjjf5Xg5VJRIYwXAG8ha+61Tvd7+qCVdzNyYfyOoBEE69B Gc9UdpXRfIjxokfidqh7mIIfjFNSI/UyVmvL9wrregXPcM+s7OlLC/0O82gOcNxU GKF3oP5XxQKBgQCPZEZIjcZ+m7yYQzMZ26FwnL9Cug4QGdgLAx2YIkJ8624l568A ftV2AcD+67Boll8NSSoZM3W1htuAifjwLNRcLKkD7yhNnGX1tC2lVqI4weWC1jjp od6H+q01lOC7PLWEntH9ey1q3M4ZFaGunz89l9CnVXCNScLri9sqG56iJQKBgHOc 50UiInhe7HbU4ZauClq5Za9FhRXGqtqGrDbFn38UBavdMUTq3p6Txgwwcp/coBoe J9uu90razU+2QPESuGPy4IPa17DB04pKNKiwzSC+9T83cpY/hJCAzazdkDqi+Yv0 Abz7wE/h6Ug+T+WxCt3sqtvCnjlbWzyh4YJAr3BtAoGBAIibPCEfVOwOfMOXkhIb liRVVGNxXQa6MwGVVfyR9gmlM85IjcBjh+Tf5+v3Mo286OlzLXQjfYW5pXR5Mgaw bKe+z5AqJlOsA+lJGTyCNnPKwaXAYHt8dZ41WhgzekibHCx7EQ+8jH1jkz2Gwou6 MDbnRu+e0FCyRFSuhB9Cim/K -----END PRIVATE KEY----- keystone-9.0.0/examples/pki/private/ssl_key.pem0000664000567000056710000000325012701407102022743 0ustar jenkinsjenkins00000000000000-----BEGIN PRIVATE KEY----- MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC5dpW18l3bs+Mc j/JdhaAa+qw1RJwShm06g+q38ZoCcCmRO3/XyHghgHWdVa+FKVm2ug923dE2PW4G SI1pZa3iqbT9Yj70nxN+0l94iym+v9/P7irolvo5OWBbBIJT1Ubjps5fJ//gz6Bd mwS0FuOy2qfKPnPhyBDH2VawtOgYMLk+PSG3YQh7vM2YvDALPTPz/f4qPmhQpb69 KBJQElFXPQ9Nu0ABCPWWC2tN87L5pakFw5zq46pttSJ7Izc8MXh3KQrh9FvjmiQu RnkMvQ/g887Sp6nEJ22ABPEFhuRr89aup6wRD2CkA/8L3zSB5BV7tTK4hQiq07cT nV9Dv6bfAgMBAAECggEBAIB1K5L/kZUITulMptGyKUgmkjq/D98g7u0Vy/CmTkcc Cx6F+LGsL9D8mfplDBKOpo4S530sfKk1+Uwu2ovDGqKhazQJ5ZMnz6gK7Ieg1ERD wDDURTIeyKf0HtJMGD0av2QU+GIeYXQEO446PhLCu+n42zkQ8tDS8xSJbCsu0odV ok6+i7nEg9sP4uDfAAtM8CUJbRpFTha+m2a7pOz3ylU7/ZV4FDIgJ+FEynaphXAo bZE4MX5I7A4DDBp7/9g9HsgefByY4xiABuk7Rsyztyf2TrJEtcsVhiV4sCIIHsow u60KGEcTQWj4npBIMgW1QUdrwmAAh/35gOjt9ZndgTkCgYEA2yT5DmihjVaNF65B 8VtdFcpESr8rr6FBmJ7z31m7MufeV1Inc5GqCK9agRmpr5sTYcgFB9it2IhW2WsA xHv+7J04bd9DBtgTv58GWrISsCR/abMZnJrm+F5Rafk77jwjCx/SwFj79ybI83Ia VJYMd7jqkxc00+DZT/3QWZqRrlsCgYEA2KeBBqUVdCpwNiJpgFM18HWjJx36HRk7 YoFapXot/6R6A/rYmS+/goBZt2CWqqGtnXqWEZvH+v4L+WlUmYQrWwtoxpdR1oXz EmlCxN7D9MbRVR7QVW24h5zdwPOlbCTGoKzowOs8UEjMfQ81zoMinLmcJgHQSyzs OawgSF+DmM0CgYBQz26EELNaMktvKxQoE3/c9CyAv8Q1TKqqxBq8BxPP7s7/tkzU AigIcdlW+Aapue7IxQCN5yocShJ0tE+hJPRZfpR7d7P4xx9pLxQhx766c4sEiEXu iPSZK/artHuUG1r01DRcN7QabJP3qeDpxjcswuTFfu49H5IjPD5jfGsyNwKBgFjh bvdQ5lo/xsUOnQV+HZTGTeaQT7l8TnZ85rkYRKKp0TysvgsqIYDiMuwd/fGGXnlK fyI+LG51pmftpD1OkZLKPXOrRHGjhjK5aCDn2rAimGI5P/KsDpXj7r1ntyeEdtAX 32y1lIrDMtDjWomcFqkBJGQbPl540Xhfeub1+EDJAoGAUZGPT2itKnxEFsa1SKHW yLeEsag/a9imAVyizo1WJn2WJaUhi1aHK49w6JRowIAzXXb7zLQt7BL8v+ydPVw3 eySpXGqFuN/Prm3So0SeWllWcPsKFAzjgE0CWjNuB0GlAZGOaJOcWUNoOZjX/SDC FpolIoaSad28tGc8tbEk3fU= -----END PRIVATE KEY----- keystone-9.0.0/examples/pki/private/signing_key.pem0000664000567000056710000000325012701407102023600 0ustar jenkinsjenkins00000000000000-----BEGIN PRIVATE KEY----- MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDEwuiHTXfQoNQ7 IXK0+YEVURd+pxJo0gPUFnYpOwfduTyu9FOBeo+Kc/+SS+6ZSKP/KyeIyc/XHBO5 tIgPiLgAbRIRMF5Bva4+OzstCeGcgxkoditQZe/DwPc8V0s8rFE0owSnQIdvXT2G yWO3IGSdLgtwLX1XHmIgDiIteEnRXmdC2Sw1wbi2qlJkjK5isCfcADDgm/42wT/f 92HHdBmI5b60gVOAam/PzR2rMjuA6TzevDgKMg+a+Y1LVfEGTdN1IyLKLsfHtJay +vjFbSaNDn1r5Uq5c0uRykq8mPrqqkBLsbWSqSTNjFfObo743PHg0goYdrIYQ4wX ptxSJRylAgMBAAECggEBAIDQPVz/CXarI+ZGQotaYPisqx3+kN3QyDLcNaVOgRrW P3UmfVjh/QEeae3ECkONu9e8z9gMjyX7uqo0F3NcBWI6Bb79FGgjnuQc8OPOeUZ2 yUyk+DxdT/eu5+04FQh2o387TjuU0lXFDBem1sI30cbZMyHQliMnwAPOXO+5tVH8 PusGNBMVvoCyfnj52uVjmAjPqLXyOMcKEhuJFbhnUURKvzkHRf43SWQsb081eh2m ACQ7uNzX7vg3aPXxSZXY2+hHX67POdqosjddu6CfoXcEHAOAUujvTOFvd1gGRkRo uOi5hNQqcN5uaqeq9enVThINDyFMzngZBhMCzRTWeK0CgYEA4qUhB7lJZLt9niDW 4Fudda1Pzu3XfxHsSG4D+xx5LunKb3ChG5x7PSLJvusfvnkm5fqhEEhbSVARo6Vn AAA52u5SPDDNwyk1ttvBR/Fc7eGwpbRQry2I6ui6baKiIOSV2K3vJlsSK8/GMQqu j0fstJuSvQR7Y6NUYxlWi+VNussCgYEA3j7tFAdGFc5JkeTHSzsU4h2+17uVDSSi yr7Duc9+9fwAbsO4go9x1CAOvV2r0WX10jPsTGg1d31pWLvJrS6QsAffmM+A0QIT eBX+umcavXWy69VExWa0xKU9wTE/nQvX9Fr8A+Klh/WfMcvoomK2zgOKoRSmes04 WKYlHWsSaE8CgYBUYcZ6abG5n1SVmwRlY7asKWqdUE/7L2EZVlyFEYTMwp5r/zL8 ZLY9fMZAHqoi8FhbJ4Tv2wChuv3WP66pgWwI5tIXNtRk5OLqwcakUmiW6IAsMYYY sotXam5+gx55wKFJmvh+/0k0ppbTi3aSQeUPGRz44sJNxnGUs8pVK3pVIQKBgQDD ga+lEtEAlbv6b7sx3wN79pbPyOBR84yRtkcPygzx74Gh7uL9V5rW9GyDAUgIqR0a kTqp7HI8b0KhIHFFu9TkRcjY8JFtS9o8pXy0FcdcK5H+DFq3HKag5ovwy5YeXTDY cMGJ2XOsqtIkSDCZySTvDgaBtVzOYoHS2jWEL5C92QKBgGmL2juXIB+HAi7UuKPg nWkVTikt5Zr2GNgYtso75E7+ljaRuf4D9eEBiOD1qYKQm8KvsiVzEs71BSmT1p1C b2hlM/5Crb7KumIkHTARQFr5NPwuBZ6NA6RLnd++vKi0WgOJtDAlR3bgwugfQdzZ 4Isaq9Rgfa/EHCKB2weQ7c3r -----END PRIVATE KEY----- keystone-9.0.0/examples/pki/cms/0000775000567000056710000000000012701407246017710 5ustar jenkinsjenkins00000000000000keystone-9.0.0/examples/pki/cms/auth_token_unscoped.json0000664000567000056710000000105112701407102024630 0ustar jenkinsjenkins00000000000000{ "access": { "token": { "expires": "2012-08-17T15:35:34Z", "id": "01e032c996ef4406b144335915a41e79" }, "serviceCatalog": {}, "user": { "username": "user_name1", "roles_links": [], "id": "c9c89e3be3ee453fbf00c7966f6d3fbd", "roles": [ { "name": "role1" }, { "name": "role2" } ], "name": "user_name1" } } } keystone-9.0.0/examples/pki/cms/revocation_list.pem0000664000567000056710000000153312701407102023610 0ustar jenkinsjenkins00000000000000-----BEGIN CMS----- MIICWgYJKoZIhvcNAQcCoIICSzCCAkcCAQExCTAHBgUrDgMCGjBpBgkqhkiG9w0B BwGgXARaeyJyZXZva2VkIjpbeyJpZCI6IjdhY2ZjZmRhZjZhMTRhZWJlOTdjNjFj NTk0N2JjNGQzIiwiZXhwaXJlcyI6IjIwMTItMDgtMTRUMTc6NTg6NDhaIn1dfQ0K MYIByjCCAcYCAQEwgaQwgZ4xCjAIBgNVBAUTATUxCzAJBgNVBAYTAlVTMQswCQYD VQQIEwJDQTESMBAGA1UEBxMJU3Vubnl2YWxlMRIwEAYDVQQKEwlPcGVuU3RhY2sx ETAPBgNVBAsTCEtleXN0b25lMSUwIwYJKoZIhvcNAQkBFhZrZXlzdG9uZUBvcGVu c3RhY2sub3JnMRQwEgYDVQQDEwtTZWxmIFNpZ25lZAIBETAHBgUrDgMCGjANBgkq hkiG9w0BAQEFAASCAQC2f05VHM7zjNT3TBO80AmZ00n7AEWUjbFe5nqIM8kWGM83 01Bi3uU/nQ0daAd3tqCmDL2EfETAjD+xnIzjlN6eIA74Vy51wFD/KiyWYPWzw8mH WcATHmE4E8kLdt8NhUodCY9TCFxcHJNDR1Eai/U7hH+5O4p9HcmMjv/GWegZL6HB Up9Cxu6haxvPFmYylzM6Qt0Ad/WiO/JZLPTA4qXJEJSa9EMFMb0c2wSDSn30swJe 7J79VTFktTr2djv8KFvaHr4vLFYv2Y3ZkTeHqam0m91vllxLZJUP5QTSHjjY6LFE 5eEjIlOv9wOOm1uTtPIq6pxCugU1Wm7gstkqr55R -----END CMS----- keystone-9.0.0/examples/pki/cms/auth_token_scoped.json0000664000567000056710000000545412701407102024300 0ustar jenkinsjenkins00000000000000{ "access": { "serviceCatalog": [ { "endpoints": [ { "adminURL": "http://127.0.0.1:8776/v1/64b6f3fbcc53435e8a60fcf89bb6617a", "region": "RegionOne", "internalURL": "http://127.0.0.1:8776/v1/64b6f3fbcc53435e8a60fcf89bb6617a", "publicURL": "http://127.0.0.1:8776/v1/64b6f3fbcc53435e8a60fcf89bb6617a" } ], "endpoints_links": [], "type": "volume", "name": "volume" }, { "endpoints": [ { "adminURL": "http://127.0.0.1:9292/v1", "region": "RegionOne", "internalURL": "http://127.0.0.1:9292/v1", "publicURL": "http://127.0.0.1:9292/v1" } ], "endpoints_links": [], "type": "image", "name": "glance" }, { "endpoints": [ { "adminURL": "http://127.0.0.1:8774/v1.1/64b6f3fbcc53435e8a60fcf89bb6617a", "region": "RegionOne", "internalURL": "http://127.0.0.1:8774/v1.1/64b6f3fbcc53435e8a60fcf89bb6617a", "publicURL": "http://127.0.0.1:8774/v1.1/64b6f3fbcc53435e8a60fcf89bb6617a" } ], "endpoints_links": [], "type": "compute", "name": "nova" }, { "endpoints": [ { "adminURL": "http://127.0.0.1:35357/v2.0", "region": "RegionOne", "internalURL": "http://127.0.0.1:35357/v2.0", "publicURL": "http://127.0.0.1:5000/v2.0" } ], "endpoints_links": [], "type": "identity", "name": "keystone" } ], "token": { "expires": "2012-06-02T14:47:34Z", "id": "placeholder", "tenant": { "enabled": true, "description": null, "name": "tenant_name1", "id": "tenant_id1" } }, "user": { "username": "user_name1", "roles_links": [ "role1", "role2" ], "id": "user_id1", "roles": [ { "name": "role1" }, { "name": "role2" } ], "name": "user_name1" } } } keystone-9.0.0/examples/pki/cms/auth_token_scoped.pem0000664000567000056710000000525412701407102024106 0ustar jenkinsjenkins00000000000000-----BEGIN CMS----- MIIHwQYJKoZIhvcNAQcCoIIHsjCCB64CAQExCTAHBgUrDgMCGjCCBc4GCSqGSIb3 DQEHAaCCBb8EggW7eyJhY2Nlc3MiOiB7InNlcnZpY2VDYXRhbG9nIjogW3siZW5k cG9pbnRzIjogW3siYWRtaW5VUkwiOiAiaHR0cDovLzEyNy4wLjAuMTo4Nzc2L3Yx LzY0YjZmM2ZiY2M1MzQzNWU4YTYwZmNmODliYjY2MTdhIiwgInJlZ2lvbiI6ICJy ZWdpb25PbmUiLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzEyNy4wLjAuMTo4Nzc2 L3YxLzY0YjZmM2ZiY2M1MzQzNWU4YTYwZmNmODliYjY2MTdhIiwgInB1YmxpY1VS TCI6ICJodHRwOi8vMTI3LjAuMC4xOjg3NzYvdjEvNjRiNmYzZmJjYzUzNDM1ZThh NjBmY2Y4OWJiNjYxN2EifV0sICJlbmRwb2ludHNfbGlua3MiOiBbXSwgInR5cGUi OiAidm9sdW1lIiwgIm5hbWUiOiAidm9sdW1lIn0sIHsiZW5kcG9pbnRzIjogW3si YWRtaW5VUkwiOiAiaHR0cDovLzEyNy4wLjAuMTo5MjkyL3YxIiwgInJlZ2lvbiI6 ICJyZWdpb25PbmUiLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzEyNy4wLjAuMTo5 MjkyL3YxIiwgInB1YmxpY1VSTCI6ICJodHRwOi8vMTI3LjAuMC4xOjkyOTIvdjEi fV0sICJlbmRwb2ludHNfbGlua3MiOiBbXSwgInR5cGUiOiAiaW1hZ2UiLCAibmFt ZSI6ICJnbGFuY2UifSwgeyJlbmRwb2ludHMiOiBbeyJhZG1pblVSTCI6ICJodHRw Oi8vMTI3LjAuMC4xOjg3NzQvdjEuMS82NGI2ZjNmYmNjNTM0MzVlOGE2MGZjZjg5 YmI2NjE3YSIsICJyZWdpb24iOiAicmVnaW9uT25lIiwgImludGVybmFsVVJMIjog Imh0dHA6Ly8xMjcuMC4wLjE6ODc3NC92MS4xLzY0YjZmM2ZiY2M1MzQzNWU4YTYw ZmNmODliYjY2MTdhIiwgInB1YmxpY1VSTCI6ICJodHRwOi8vMTI3LjAuMC4xOjg3 NzQvdjEuMS82NGI2ZjNmYmNjNTM0MzVlOGE2MGZjZjg5YmI2NjE3YSJ9XSwgImVu ZHBvaW50c19saW5rcyI6IFtdLCAidHlwZSI6ICJjb21wdXRlIiwgIm5hbWUiOiAi bm92YSJ9LCB7ImVuZHBvaW50cyI6IFt7ImFkbWluVVJMIjogImh0dHA6Ly8xMjcu MC4wLjE6MzUzNTcvdjIuMCIsICJyZWdpb24iOiAiUmVnaW9uT25lIiwgImludGVy bmFsVVJMIjogImh0dHA6Ly8xMjcuMC4wLjE6MzUzNTcvdjIuMCIsICJwdWJsaWNV UkwiOiAiaHR0cDovLzEyNy4wLjAuMTo1MDAwL3YyLjAifV0sICJlbmRwb2ludHNf bGlua3MiOiBbXSwgInR5cGUiOiAiaWRlbnRpdHkiLCAibmFtZSI6ICJrZXlzdG9u ZSJ9XSwidG9rZW4iOiB7ImV4cGlyZXMiOiAiMjAxMi0wNi0wMlQxNDo0NzozNFoi LCAiaWQiOiAicGxhY2Vob2xkZXIiLCAidGVuYW50IjogeyJlbmFibGVkIjogdHJ1 ZSwgImRlc2NyaXB0aW9uIjogbnVsbCwgIm5hbWUiOiAidGVuYW50X25hbWUxIiwg ImlkIjogInRlbmFudF9pZDEifX0sICJ1c2VyIjogeyJ1c2VybmFtZSI6ICJ1c2Vy X25hbWUxIiwgInJvbGVzX2xpbmtzIjogWyJyb2xlMSIsInJvbGUyIl0sICJpZCI6 ICJ1c2VyX2lkMSIsICJyb2xlcyI6IFt7Im5hbWUiOiAicm9sZTEifSwgeyJuYW1l IjogInJvbGUyIn1dLCAibmFtZSI6ICJ1c2VyX25hbWUxIn19fQ0KMYIByjCCAcYC AQEwgaQwgZ4xCjAIBgNVBAUTATUxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTES MBAGA1UEBxMJU3Vubnl2YWxlMRIwEAYDVQQKEwlPcGVuU3RhY2sxETAPBgNVBAsT CEtleXN0b25lMSUwIwYJKoZIhvcNAQkBFhZrZXlzdG9uZUBvcGVuc3RhY2sub3Jn MRQwEgYDVQQDEwtTZWxmIFNpZ25lZAIBETAHBgUrDgMCGjANBgkqhkiG9w0BAQEF AASCAQCAtuVtqTU9h1uaRrYU1eusSnHwD6jizp/xltTrYTyFPfYjhJdglS+bjSeS Iau9pN3Tfug98ozUTJ5ByNepAQtxBxPz5bDXhBmAbU6ywaolqRAG+b/s2ShNGQ2a tn80NeZmDNbtoqdHVAkD3EZXjsEKr2w+3JTTF2indzczyGe5EeSfNUaT+ZhNEmPR Urob62t8atW+zehCSurpaa8pC5m1NcbK8Uu6Y+qO2m08KU9w5kmbOQtWAGCmtpIx F2yM1AbSgd90yzen7dv5mNkgZyzQ6SYgRUvkKOKnCyBb97EZK3ZR4qUxQzRYM++8 g8HdaIfoYVPoPHqODet8Xmhw/Wtp -----END CMS----- keystone-9.0.0/examples/pki/cms/auth_token_revoked.json0000664000567000056710000000550212701407102024454 0ustar jenkinsjenkins00000000000000{ "access": { "serviceCatalog": [ { "endpoints": [ { "adminURL": "http://127.0.0.1:8776/v1/64b6f3fbcc53435e8a60fcf89bb6617a", "region": "RegionOne", "internalURL": "http://127.0.0.1:8776/v1/64b6f3fbcc53435e8a60fcf89bb6617a", "publicURL": "http://127.0.0.1:8776/v1/64b6f3fbcc53435e8a60fcf89bb6617a" } ], "endpoints_links": [], "type": "volume", "name": "volume" }, { "endpoints": [ { "adminURL": "http://127.0.0.1:9292/v1", "region": "RegionOne", "internalURL": "http://127.0.0.1:9292/v1", "publicURL": "http://127.0.0.1:9292/v1" } ], "endpoints_links": [], "type": "image", "name": "glance" }, { "endpoints": [ { "adminURL": "http://127.0.0.1:8774/v1.1/64b6f3fbcc53435e8a60fcf89bb6617a", "region": "RegionOne", "internalURL": "http://127.0.0.1:8774/v1.1/64b6f3fbcc53435e8a60fcf89bb6617a", "publicURL": "http://127.0.0.1:8774/v1.1/64b6f3fbcc53435e8a60fcf89bb6617a" } ], "endpoints_links": [], "type": "compute", "name": "nova" }, { "endpoints": [ { "adminURL": "http://127.0.0.1:35357/v2.0", "region": "RegionOne", "internalURL": "http://127.0.0.1:35357/v2.0", "publicURL": "http://127.0.0.1:5000/v2.0" } ], "endpoints_links": [], "type": "identity", "name": "keystone" } ], "token": { "expires": "2012-06-02T14:47:34Z", "id": "placeholder", "tenant": { "enabled": true, "description": null, "name": "tenant_name1", "id": "tenant_id1" } }, "user": { "username": "revoked_username1", "roles_links": [ "role1", "role2" ], "id": "revoked_user_id1", "roles": [ { "name": "role1" }, { "name": "role2" } ], "name": "revoked_username1" } } } keystone-9.0.0/examples/pki/cms/auth_token_unscoped.pem0000664000567000056710000000216312701407102024445 0ustar jenkinsjenkins00000000000000-----BEGIN CMS----- MIIDKAYJKoZIhvcNAQcCoIIDGTCCAxUCAQExCTAHBgUrDgMCGjCCATUGCSqGSIb3 DQEHAaCCASYEggEieyJhY2Nlc3MiOiB7InRva2VuIjogeyJleHBpcmVzIjogIjIw MTItMDgtMTdUMTU6MzU6MzRaIiwgImlkIjogIjAxZTAzMmM5OTZlZjQ0MDZiMTQ0 MzM1OTE1YTQxZTc5In0sICJzZXJ2aWNlQ2F0YWxvZyI6IHt9LCAidXNlciI6IHsi dXNlcm5hbWUiOiAidXNlcl9uYW1lMSIsICJyb2xlc19saW5rcyI6IFtdLCAiaWQi OiAiYzljODllM2JlM2VlNDUzZmJmMDBjNzk2NmY2ZDNmYmQiLCAicm9sZXMiOiBb eyduYW1lJzogJ3JvbGUxJ30seyduYW1lJzogJ3JvbGUyJ30sXSwgIm5hbWUiOiAi dXNlcl9uYW1lMSJ9fX0xggHKMIIBxgIBATCBpDCBnjEKMAgGA1UEBRMBNTELMAkG A1UEBhMCVVMxCzAJBgNVBAgTAkNBMRIwEAYDVQQHEwlTdW5ueXZhbGUxEjAQBgNV BAoTCU9wZW5TdGFjazERMA8GA1UECxMIS2V5c3RvbmUxJTAjBgkqhkiG9w0BCQEW FmtleXN0b25lQG9wZW5zdGFjay5vcmcxFDASBgNVBAMTC1NlbGYgU2lnbmVkAgER MAcGBSsOAwIaMA0GCSqGSIb3DQEBAQUABIIBAFyD9IH2bXsafCTyHEWS28zBuq03 ZNWXV4+0BfdMbX1ONkaQ7mLGRmfabLHwfE5RaSASFh/Doq7KTc8XrBVfTm9HQPGr TLZUawdYlyBFVq0PEE1cPvO9Blz4X/2Awcp/Q67YRd/oLCY2dFWMClMroXu1fy3P oFlpWPPhURrbU1GjhUgPIz0IxNGjfWEHVsb5kz7Bo4E8J3pgIkccm97XZZtiCwf7 DVNj+Eb5mRegGG6IgSSRpZULgnCmSofQ3RnW3jSCkDxLXDQm9IsaaLJsuUFLylGs mB/98w9mP192IGl5MVr8/tANXwb5ok2VatUp/Ww1U0IlWbhN374PbK76vcE= -----END CMS----- keystone-9.0.0/examples/pki/cms/auth_token_revoked.pem0000664000567000056710000000531412701407102024265 0ustar jenkinsjenkins00000000000000-----BEGIN CMS----- MIIH1wYJKoZIhvcNAQcCoIIHyDCCB8QCAQExCTAHBgUrDgMCGjCCBeQGCSqGSIb3 DQEHAaCCBdUEggXReyJhY2Nlc3MiOiB7InNlcnZpY2VDYXRhbG9nIjogW3siZW5k cG9pbnRzIjogW3siYWRtaW5VUkwiOiAiaHR0cDovLzEyNy4wLjAuMTo4Nzc2L3Yx LzY0YjZmM2ZiY2M1MzQzNWU4YTYwZmNmODliYjY2MTdhIiwgInJlZ2lvbiI6ICJy ZWdpb25PbmUiLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzEyNy4wLjAuMTo4Nzc2 L3YxLzY0YjZmM2ZiY2M1MzQzNWU4YTYwZmNmODliYjY2MTdhIiwgInB1YmxpY1VS TCI6ICJodHRwOi8vMTI3LjAuMC4xOjg3NzYvdjEvNjRiNmYzZmJjYzUzNDM1ZThh NjBmY2Y4OWJiNjYxN2EifV0sICJlbmRwb2ludHNfbGlua3MiOiBbXSwgInR5cGUi OiAidm9sdW1lIiwgIm5hbWUiOiAidm9sdW1lIn0sIHsiZW5kcG9pbnRzIjogW3si YWRtaW5VUkwiOiAiaHR0cDovLzEyNy4wLjAuMTo5MjkyL3YxIiwgInJlZ2lvbiI6 ICJyZWdpb25PbmUiLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzEyNy4wLjAuMTo5 MjkyL3YxIiwgInB1YmxpY1VSTCI6ICJodHRwOi8vMTI3LjAuMC4xOjkyOTIvdjEi fV0sICJlbmRwb2ludHNfbGlua3MiOiBbXSwgInR5cGUiOiAiaW1hZ2UiLCAibmFt ZSI6ICJnbGFuY2UifSwgeyJlbmRwb2ludHMiOiBbeyJhZG1pblVSTCI6ICJodHRw Oi8vMTI3LjAuMC4xOjg3NzQvdjEuMS82NGI2ZjNmYmNjNTM0MzVlOGE2MGZjZjg5 YmI2NjE3YSIsICJyZWdpb24iOiAicmVnaW9uT25lIiwgImludGVybmFsVVJMIjog Imh0dHA6Ly8xMjcuMC4wLjE6ODc3NC92MS4xLzY0YjZmM2ZiY2M1MzQzNWU4YTYw ZmNmODliYjY2MTdhIiwgInB1YmxpY1VSTCI6ICJodHRwOi8vMTI3LjAuMC4xOjg3 NzQvdjEuMS82NGI2ZjNmYmNjNTM0MzVlOGE2MGZjZjg5YmI2NjE3YSJ9XSwgImVu ZHBvaW50c19saW5rcyI6IFtdLCAidHlwZSI6ICJjb21wdXRlIiwgIm5hbWUiOiAi bm92YSJ9LCB7ImVuZHBvaW50cyI6IFt7ImFkbWluVVJMIjogImh0dHA6Ly8xMjcu MC4wLjE6MzUzNTcvdjIuMCIsICJyZWdpb24iOiAiUmVnaW9uT25lIiwgImludGVy bmFsVVJMIjogImh0dHA6Ly8xMjcuMC4wLjE6MzUzNTcvdjIuMCIsICJwdWJsaWNV UkwiOiAiaHR0cDovLzEyNy4wLjAuMTo1MDAwL3YyLjAifV0sICJlbmRwb2ludHNf bGlua3MiOiBbXSwgInR5cGUiOiAiaWRlbnRpdHkiLCAibmFtZSI6ICJrZXlzdG9u ZSJ9XSwidG9rZW4iOiB7ImV4cGlyZXMiOiAiMjAxMi0wNi0wMlQxNDo0NzozNFoi LCAiaWQiOiAicGxhY2Vob2xkZXIiLCAidGVuYW50IjogeyJlbmFibGVkIjogdHJ1 ZSwgImRlc2NyaXB0aW9uIjogbnVsbCwgIm5hbWUiOiAidGVuYW50X25hbWUxIiwg ImlkIjogInRlbmFudF9pZDEifX0sICJ1c2VyIjogeyJ1c2VybmFtZSI6ICJyZXZv a2VkX3VzZXJuYW1lMSIsICJyb2xlc19saW5rcyI6IFsicm9sZTEiLCJyb2xlMiJd LCAiaWQiOiAicmV2b2tlZF91c2VyX2lkMSIsICJyb2xlcyI6IFt7Im5hbWUiOiAi cm9sZTEifSwgeyJuYW1lIjogInJvbGUyIn1dLCAibmFtZSI6ICJyZXZva2VkX3Vz ZXJuYW1lMSJ9fX0NCjGCAcowggHGAgEBMIGkMIGeMQowCAYDVQQFEwE1MQswCQYD VQQGEwJVUzELMAkGA1UECBMCQ0ExEjAQBgNVBAcTCVN1bm55dmFsZTESMBAGA1UE ChMJT3BlblN0YWNrMREwDwYDVQQLEwhLZXlzdG9uZTElMCMGCSqGSIb3DQEJARYW a2V5c3RvbmVAb3BlbnN0YWNrLm9yZzEUMBIGA1UEAxMLU2VsZiBTaWduZWQCAREw BwYFKw4DAhowDQYJKoZIhvcNAQEBBQAEggEAXY8JvllpyctcNlJByPLxhgLyRfFo Ew+8Yq3O4FxOyfVkINvOz4EHTipY0M/K8OLwfxpRt7o/iGLGRDBTI6Dd+erXsus8 NecnNxcWN9RUE2CZhoGj/0nhnNEGF+9Mlv3tMBngwoUJg2paSw/Vn2Q7RaqbOC05 aZOSDoSX7Zf0DIS/T0ZPnmOUb9+N25M20ctMHksPMEq0qyf2oove0O+WMa/cA8JT c2EAhew4WSD0Zv0GOAP30GS+hkNfA1GZTrvCQrpRs9jXhK4dR2bBsnUFVix1BEZ0 sDhI8cXLvm16IpOO8ov6002ZoZhPn6Qo+0J8QOfdnjiwNnxLOEbuOIwPeQ== -----END CMS----- keystone-9.0.0/examples/pki/cms/revocation_list.json0000664000567000056710000000022312701407102023773 0ustar jenkinsjenkins00000000000000{ "revoked": [ { "id": "7acfcfdaf6a14aebe97c61c5947bc4d3", "expires": "2012-08-14T17:58:48Z" } ] } keystone-9.0.0/examples/pki/certs/0000775000567000056710000000000012701407246020246 5ustar jenkinsjenkins00000000000000keystone-9.0.0/examples/pki/certs/ssl_cert.pem0000664000567000056710000000245612701407102022565 0ustar jenkinsjenkins00000000000000-----BEGIN CERTIFICATE----- MIIDpjCCAo4CARAwDQYJKoZIhvcNAQEFBQAwgZ4xCjAIBgNVBAUTATUxCzAJBgNV BAYTAlVTMQswCQYDVQQIEwJDQTESMBAGA1UEBxMJU3Vubnl2YWxlMRIwEAYDVQQK EwlPcGVuU3RhY2sxETAPBgNVBAsTCEtleXN0b25lMSUwIwYJKoZIhvcNAQkBFhZr ZXlzdG9uZUBvcGVuc3RhY2sub3JnMRQwEgYDVQQDEwtTZWxmIFNpZ25lZDAgFw0x MzA3MDkxNjI1MDBaGA8yMDcyMDEwMTE2MjUwMFowgZAxCzAJBgNVBAYTAlVTMQsw CQYDVQQIEwJDQTESMBAGA1UEBxMJU3Vubnl2YWxlMRIwEAYDVQQKEwlPcGVuU3Rh Y2sxETAPBgNVBAsTCEtleXN0b25lMSUwIwYJKoZIhvcNAQkBFhZrZXlzdG9uZUBv cGVuc3RhY2sub3JnMRIwEAYDVQQDEwlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEB AQUAA4IBDwAwggEKAoIBAQC5dpW18l3bs+Mcj/JdhaAa+qw1RJwShm06g+q38ZoC cCmRO3/XyHghgHWdVa+FKVm2ug923dE2PW4GSI1pZa3iqbT9Yj70nxN+0l94iym+ v9/P7irolvo5OWBbBIJT1Ubjps5fJ//gz6BdmwS0FuOy2qfKPnPhyBDH2VawtOgY MLk+PSG3YQh7vM2YvDALPTPz/f4qPmhQpb69KBJQElFXPQ9Nu0ABCPWWC2tN87L5 pakFw5zq46pttSJ7Izc8MXh3KQrh9FvjmiQuRnkMvQ/g887Sp6nEJ22ABPEFhuRr 89aup6wRD2CkA/8L3zSB5BV7tTK4hQiq07cTnV9Dv6bfAgMBAAEwDQYJKoZIhvcN AQEFBQADggEBAIVz3ZwxSUF/y5ABmjnVIQaVVxH97bu07smFQUe0AB2I9R4xnBJ9 jn93DpeixZvArCZuDuJEJvNER8S6L3r/OPMPrVzayxibXATaZRE8khMWEJpsnyeW 8paA5NuZJwN2NjlPOmT47J1m7ZjLgkrVwjhwQZPMnh5kG9690TBJNhg9x3Z8f6p3 iKj2AfZWGhp9Xr2xOZCpfvAZmyvKOMeuHVrRZ2VWGuzojQd7fjSEDw/+Tg8Gw1LV BQXjXiKQHsD1YID2a9Pe9yrBjO00ZMxMw8+wN9qrh+8vxfmwTO8tEkmcpvM4ivO3 /oGGhQh6nSncERVI7rx+wBDnIHKBz6MU2Ow= -----END CERTIFICATE----- keystone-9.0.0/examples/pki/certs/cacert.pem0000664000567000056710000000255712701407102022212 0ustar jenkinsjenkins00000000000000-----BEGIN CERTIFICATE----- MIID1jCCAr6gAwIBAgIJAKiIU3dYUGKeMA0GCSqGSIb3DQEBBQUAMIGeMQowCAYD VQQFEwE1MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEjAQBgNVBAcTCVN1bm55 dmFsZTESMBAGA1UEChMJT3BlblN0YWNrMREwDwYDVQQLEwhLZXlzdG9uZTElMCMG CSqGSIb3DQEJARYWa2V5c3RvbmVAb3BlbnN0YWNrLm9yZzEUMBIGA1UEAxMLU2Vs ZiBTaWduZWQwIBcNMTMwNzA5MTYyNTAwWhgPMjA3MjAxMDExNjI1MDBaMIGeMQow CAYDVQQFEwE1MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEjAQBgNVBAcTCVN1 bm55dmFsZTESMBAGA1UEChMJT3BlblN0YWNrMREwDwYDVQQLEwhLZXlzdG9uZTEl MCMGCSqGSIb3DQEJARYWa2V5c3RvbmVAb3BlbnN0YWNrLm9yZzEUMBIGA1UEAxML U2VsZiBTaWduZWQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCh1U+N 3g2cjFi7GeVf21FIv8MDhughFCey9rysAuqFONSFYo2rectLgpDtVy4BFFUFlxmh 8Ci9TEZ5LiA31tbc4584GxvlLt4dg8aFsUJRBKq0L9i7W5v9uFpHrY1Zr+P4vwG+ v7IWOuzw19f517eGpp6LLcj2vrpN9Yb63rrydKOqr0KJodMd+vFKmi+euFcPqs6s w1OiC5DpJN479CGl2Fs1WzMoKDedRNiXG7ysrVrYQIkfMBABBPIwilq1xXZz9Ybo 0PbNgOu6xpSsy9hq+IzxcwYsr5CwIcbqW6Ju+Ti2iBEaff20lW7dFzO4kwrcqOr9 Jnn7qE8YfJo9Hyj3AgMBAAGjEzARMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcN AQEFBQADggEBAGWFTQTe2FwvwGWa/Bx3Ypc8pJ05ucmGDm8XZiUHj1mOvFHTcveL Iofb+vR2lynr+MwF9Dn1szGteVNn/QxrHJIoxsgf1n/9fdyYqjoKWXblNBMt5jhr IlMGdQMqHSDzlkZKbcXg5vzHnG5mrwh0rojcZItZznXTSo/XnujEtHwIvCo6rk9c tRRzpkcDkg+/SZf2izchsLoEQVsJsIZMnWl0hUGFHaDfx2JQn7bnAcC84wPVhRJ+ Xa3kDok1r7Nd7Vr/Wf0hCNRxyv2dySD/bq5iCEl1HNik3KCq4eUicTtkGe5N+Was ucf1RhPD3oZbxlTX4QDN7grSCdrTESyuhfc= -----END CERTIFICATE----- keystone-9.0.0/examples/pki/certs/signing_cert.pem0000664000567000056710000000245612701407102023422 0ustar jenkinsjenkins00000000000000-----BEGIN CERTIFICATE----- MIIDpTCCAo0CAREwDQYJKoZIhvcNAQEFBQAwgZ4xCjAIBgNVBAUTATUxCzAJBgNV BAYTAlVTMQswCQYDVQQIEwJDQTESMBAGA1UEBxMJU3Vubnl2YWxlMRIwEAYDVQQK EwlPcGVuU3RhY2sxETAPBgNVBAsTCEtleXN0b25lMSUwIwYJKoZIhvcNAQkBFhZr ZXlzdG9uZUBvcGVuc3RhY2sub3JnMRQwEgYDVQQDEwtTZWxmIFNpZ25lZDAgFw0x MzA3MDkxNjI1MDBaGA8yMDcyMDEwMTE2MjUwMFowgY8xCzAJBgNVBAYTAlVTMQsw CQYDVQQIEwJDQTESMBAGA1UEBxMJU3Vubnl2YWxlMRIwEAYDVQQKEwlPcGVuU3Rh Y2sxETAPBgNVBAsTCEtleXN0b25lMSUwIwYJKoZIhvcNAQkBFhZrZXlzdG9uZUBv cGVuc3RhY2sub3JnMREwDwYDVQQDEwhLZXlzdG9uZTCCASIwDQYJKoZIhvcNAQEB BQADggEPADCCAQoCggEBAMTC6IdNd9Cg1DshcrT5gRVRF36nEmjSA9QWdik7B925 PK70U4F6j4pz/5JL7plIo/8rJ4jJz9ccE7m0iA+IuABtEhEwXkG9rj47Oy0J4ZyD GSh2K1Bl78PA9zxXSzysUTSjBKdAh29dPYbJY7cgZJ0uC3AtfVceYiAOIi14SdFe Z0LZLDXBuLaqUmSMrmKwJ9wAMOCb/jbBP9/3Ycd0GYjlvrSBU4Bqb8/NHasyO4Dp PN68OAoyD5r5jUtV8QZN03UjIsoux8e0lrL6+MVtJo0OfWvlSrlzS5HKSryY+uqq QEuxtZKpJM2MV85ujvjc8eDSChh2shhDjBem3FIlHKUCAwEAATANBgkqhkiG9w0B AQUFAAOCAQEAed9fHgdJrk+gZcO5gsqq6uURfDOuYD66GsSdZw4BqHjYAcnyWq2d a+iw7Uxkqu7iLf2k4+Hu3xjDFrce479OwZkSnbXmqB7XspTGOuM8MgT7jB/ypKTO Z6qaZKSWK1Hta995hMrVVlhUNBLh0MPGqoVWYA4d7mblujgH9vp+4mpCciJagHks 8K5FBmI+pobB+uFdSYDoRzX9LTpStspK4e3IoY8baILuGcdKimRNBv6ItG4hMrnt Ae1/nWMJyUu5rDTGf2V/vAaS0S/faJBwQSz1o38QHMTWHNspfwIdX3yMqI9u7/vY lz3rLy5WdBdUgZrZ3/VLmJTiJVZu5Owq4Q== -----END CERTIFICATE----- keystone-9.0.0/examples/pki/certs/middleware.pem0000664000567000056710000000572612701407102023067 0ustar jenkinsjenkins00000000000000-----BEGIN CERTIFICATE----- MIIDpjCCAo4CARAwDQYJKoZIhvcNAQEFBQAwgZ4xCjAIBgNVBAUTATUxCzAJBgNV BAYTAlVTMQswCQYDVQQIEwJDQTESMBAGA1UEBxMJU3Vubnl2YWxlMRIwEAYDVQQK EwlPcGVuU3RhY2sxETAPBgNVBAsTCEtleXN0b25lMSUwIwYJKoZIhvcNAQkBFhZr ZXlzdG9uZUBvcGVuc3RhY2sub3JnMRQwEgYDVQQDEwtTZWxmIFNpZ25lZDAgFw0x MzA3MDkxNjI1MDBaGA8yMDcyMDEwMTE2MjUwMFowgZAxCzAJBgNVBAYTAlVTMQsw CQYDVQQIEwJDQTESMBAGA1UEBxMJU3Vubnl2YWxlMRIwEAYDVQQKEwlPcGVuU3Rh Y2sxETAPBgNVBAsTCEtleXN0b25lMSUwIwYJKoZIhvcNAQkBFhZrZXlzdG9uZUBv cGVuc3RhY2sub3JnMRIwEAYDVQQDEwlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEB AQUAA4IBDwAwggEKAoIBAQC5dpW18l3bs+Mcj/JdhaAa+qw1RJwShm06g+q38ZoC cCmRO3/XyHghgHWdVa+FKVm2ug923dE2PW4GSI1pZa3iqbT9Yj70nxN+0l94iym+ v9/P7irolvo5OWBbBIJT1Ubjps5fJ//gz6BdmwS0FuOy2qfKPnPhyBDH2VawtOgY MLk+PSG3YQh7vM2YvDALPTPz/f4qPmhQpb69KBJQElFXPQ9Nu0ABCPWWC2tN87L5 pakFw5zq46pttSJ7Izc8MXh3KQrh9FvjmiQuRnkMvQ/g887Sp6nEJ22ABPEFhuRr 89aup6wRD2CkA/8L3zSB5BV7tTK4hQiq07cTnV9Dv6bfAgMBAAEwDQYJKoZIhvcN AQEFBQADggEBAIVz3ZwxSUF/y5ABmjnVIQaVVxH97bu07smFQUe0AB2I9R4xnBJ9 jn93DpeixZvArCZuDuJEJvNER8S6L3r/OPMPrVzayxibXATaZRE8khMWEJpsnyeW 8paA5NuZJwN2NjlPOmT47J1m7ZjLgkrVwjhwQZPMnh5kG9690TBJNhg9x3Z8f6p3 iKj2AfZWGhp9Xr2xOZCpfvAZmyvKOMeuHVrRZ2VWGuzojQd7fjSEDw/+Tg8Gw1LV BQXjXiKQHsD1YID2a9Pe9yrBjO00ZMxMw8+wN9qrh+8vxfmwTO8tEkmcpvM4ivO3 /oGGhQh6nSncERVI7rx+wBDnIHKBz6MU2Ow= -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC5dpW18l3bs+Mc j/JdhaAa+qw1RJwShm06g+q38ZoCcCmRO3/XyHghgHWdVa+FKVm2ug923dE2PW4G SI1pZa3iqbT9Yj70nxN+0l94iym+v9/P7irolvo5OWBbBIJT1Ubjps5fJ//gz6Bd mwS0FuOy2qfKPnPhyBDH2VawtOgYMLk+PSG3YQh7vM2YvDALPTPz/f4qPmhQpb69 KBJQElFXPQ9Nu0ABCPWWC2tN87L5pakFw5zq46pttSJ7Izc8MXh3KQrh9FvjmiQu RnkMvQ/g887Sp6nEJ22ABPEFhuRr89aup6wRD2CkA/8L3zSB5BV7tTK4hQiq07cT nV9Dv6bfAgMBAAECggEBAIB1K5L/kZUITulMptGyKUgmkjq/D98g7u0Vy/CmTkcc Cx6F+LGsL9D8mfplDBKOpo4S530sfKk1+Uwu2ovDGqKhazQJ5ZMnz6gK7Ieg1ERD wDDURTIeyKf0HtJMGD0av2QU+GIeYXQEO446PhLCu+n42zkQ8tDS8xSJbCsu0odV ok6+i7nEg9sP4uDfAAtM8CUJbRpFTha+m2a7pOz3ylU7/ZV4FDIgJ+FEynaphXAo bZE4MX5I7A4DDBp7/9g9HsgefByY4xiABuk7Rsyztyf2TrJEtcsVhiV4sCIIHsow u60KGEcTQWj4npBIMgW1QUdrwmAAh/35gOjt9ZndgTkCgYEA2yT5DmihjVaNF65B 8VtdFcpESr8rr6FBmJ7z31m7MufeV1Inc5GqCK9agRmpr5sTYcgFB9it2IhW2WsA xHv+7J04bd9DBtgTv58GWrISsCR/abMZnJrm+F5Rafk77jwjCx/SwFj79ybI83Ia VJYMd7jqkxc00+DZT/3QWZqRrlsCgYEA2KeBBqUVdCpwNiJpgFM18HWjJx36HRk7 YoFapXot/6R6A/rYmS+/goBZt2CWqqGtnXqWEZvH+v4L+WlUmYQrWwtoxpdR1oXz EmlCxN7D9MbRVR7QVW24h5zdwPOlbCTGoKzowOs8UEjMfQ81zoMinLmcJgHQSyzs OawgSF+DmM0CgYBQz26EELNaMktvKxQoE3/c9CyAv8Q1TKqqxBq8BxPP7s7/tkzU AigIcdlW+Aapue7IxQCN5yocShJ0tE+hJPRZfpR7d7P4xx9pLxQhx766c4sEiEXu iPSZK/artHuUG1r01DRcN7QabJP3qeDpxjcswuTFfu49H5IjPD5jfGsyNwKBgFjh bvdQ5lo/xsUOnQV+HZTGTeaQT7l8TnZ85rkYRKKp0TysvgsqIYDiMuwd/fGGXnlK fyI+LG51pmftpD1OkZLKPXOrRHGjhjK5aCDn2rAimGI5P/KsDpXj7r1ntyeEdtAX 32y1lIrDMtDjWomcFqkBJGQbPl540Xhfeub1+EDJAoGAUZGPT2itKnxEFsa1SKHW yLeEsag/a9imAVyizo1WJn2WJaUhi1aHK49w6JRowIAzXXb7zLQt7BL8v+ydPVw3 eySpXGqFuN/Prm3So0SeWllWcPsKFAzjgE0CWjNuB0GlAZGOaJOcWUNoOZjX/SDC FpolIoaSad28tGc8tbEk3fU= -----END PRIVATE KEY----- keystone-9.0.0/tests-py3-blacklist.txt0000664000567000056710000000060112701407105021076 0ustar jenkinsjenkins00000000000000keystone.tests.unit.common.test_ldap keystone.tests.unit.common.test_notifications keystone.tests.unit.test_backend_ldap keystone.tests.unit.test_backend_ldap_pool keystone.tests.unit.test_v2 keystone.tests.unit.test_v3_auth keystone.tests.unit.test_v3_credential keystone.tests.unit.test_v3_federation keystone.tests.unit.test_v3_oauth1 keystone.tests.unit.token.test_fernet_provider keystone-9.0.0/keystone.egg-info/0000775000567000056710000000000012701407246020060 5ustar jenkinsjenkins00000000000000keystone-9.0.0/keystone.egg-info/requires.txt0000664000567000056710000000170312701407245022460 0ustar jenkinsjenkins00000000000000pbr>=1.6 WebOb>=1.2.3 eventlet!=0.18.3,>=0.18.2 greenlet>=0.3.2 PasteDeploy>=1.5.0 Paste cryptography>=1.0 six>=1.9.0 SQLAlchemy<1.1.0,>=1.0.10 sqlalchemy-migrate>=0.9.6 stevedore>=1.5.0 passlib>=1.6 python-keystoneclient!=1.8.0,!=2.1.0,>=1.6.0 keystonemiddleware!=4.1.0,>=4.0.0 oslo.cache>=1.5.0 oslo.concurrency>=3.5.0 oslo.config>=3.7.0 oslo.context>=0.2.0 oslo.messaging>=4.0.0 oslo.db>=4.1.0 oslo.i18n>=2.1.0 oslo.log>=1.14.0 oslo.middleware>=3.0.0 oslo.policy>=0.5.0 oslo.serialization>=1.10.0 oslo.service>=1.0.0 oslo.utils>=3.5.0 oauthlib>=0.6 pysaml2<4.0.3,>=2.4.0 dogpile.cache>=0.5.7 jsonschema!=2.5.0,<3.0.0,>=2.0.0 pycadf!=2.0.0,>=1.1.0 msgpack-python>=0.4.0 [:(python_version!='2.7')] Routes!=2.0,>=1.12.3 [:(python_version=='2.7')] Routes!=2.0,!=2.1,>=1.12.3 [bandit] bandit>=0.17.3 # Apache-2.0 [ldap:(python_version=='2.7')] python-ldap>=2.4 ldappool>=1.0 [memcache] python-memcached>=1.56 # PSF [mongodb] pymongo!=3.1,>=3.0.2 # Apache-2.0 keystone-9.0.0/keystone.egg-info/dependency_links.txt0000664000567000056710000000000112701407245024125 0ustar jenkinsjenkins00000000000000 keystone-9.0.0/keystone.egg-info/not-zip-safe0000664000567000056710000000000112701407237022306 0ustar jenkinsjenkins00000000000000 keystone-9.0.0/keystone.egg-info/SOURCES.txt0000664000567000056710000007030712701407246021753 0ustar jenkinsjenkins00000000000000.coveragerc .mailmap .testr.conf AUTHORS CONTRIBUTING.rst ChangeLog HACKING.rst LICENSE MANIFEST.in README.rst babel.cfg requirements.txt setup.cfg setup.py test-requirements.txt tests-py3-blacklist.txt tox.ini config-generator/keystone.conf doc/Makefile doc/README.rst doc/source/apache-httpd.rst doc/source/api_curl_examples.rst doc/source/architecture.rst doc/source/auth-totp.rst doc/source/community.rst doc/source/conf.py doc/source/configuration.rst doc/source/configure_federation.rst doc/source/configure_tokenless_x509.rst doc/source/configuringservices.rst doc/source/developing.rst doc/source/developing_drivers.rst doc/source/event_notifications.rst doc/source/extensions.rst doc/source/external-auth.rst doc/source/http-api.rst doc/source/index.rst doc/source/installing.rst doc/source/key_terms.rst doc/source/mapping_combinations.rst doc/source/mapping_schema.rst doc/source/middlewarearchitecture.rst doc/source/online_schema_migration_examples.rst doc/source/policy_mapping.rst doc/source/sample_config.rst doc/source/services.rst doc/source/devref/development.environment.rst doc/source/federation/mellon.rst doc/source/federation/openidc.rst doc/source/federation/shibboleth.rst doc/source/federation/websso.rst doc/source/man/keystone-all.rst doc/source/man/keystone-manage.rst etc/default_catalog.templates etc/keystone-paste.ini etc/keystone.conf.sample etc/logging.conf.sample etc/policy.json etc/policy.v3cloudsample.json etc/sso_callback_template.html examples/pki/gen_pki.sh examples/pki/certs/cacert.pem examples/pki/certs/middleware.pem examples/pki/certs/signing_cert.pem examples/pki/certs/ssl_cert.pem examples/pki/cms/auth_token_revoked.json examples/pki/cms/auth_token_revoked.pem examples/pki/cms/auth_token_scoped.json examples/pki/cms/auth_token_scoped.pem examples/pki/cms/auth_token_unscoped.json examples/pki/cms/auth_token_unscoped.pem examples/pki/cms/revocation_list.json examples/pki/cms/revocation_list.pem examples/pki/private/cakey.pem examples/pki/private/signing_key.pem examples/pki/private/ssl_key.pem httpd/README httpd/keystone-uwsgi-admin.ini httpd/keystone-uwsgi-public.ini httpd/keystone.py httpd/uwsgi-keystone.conf httpd/wsgi-keystone.conf keystone/__init__.py keystone/exception.py keystone/i18n.py keystone/notifications.py keystone/service.py keystone.egg-info/PKG-INFO keystone.egg-info/SOURCES.txt keystone.egg-info/dependency_links.txt keystone.egg-info/entry_points.txt keystone.egg-info/not-zip-safe keystone.egg-info/pbr.json keystone.egg-info/requires.txt keystone.egg-info/top_level.txt keystone/assignment/__init__.py keystone/assignment/controllers.py keystone/assignment/core.py keystone/assignment/routers.py keystone/assignment/schema.py keystone/assignment/V8_backends/__init__.py keystone/assignment/V8_backends/sql.py keystone/assignment/V8_role_backends/__init__.py keystone/assignment/V8_role_backends/sql.py keystone/assignment/backends/__init__.py keystone/assignment/backends/sql.py keystone/assignment/role_backends/__init__.py keystone/assignment/role_backends/sql.py keystone/auth/__init__.py keystone/auth/controllers.py keystone/auth/core.py keystone/auth/routers.py keystone/auth/plugins/__init__.py keystone/auth/plugins/core.py keystone/auth/plugins/external.py keystone/auth/plugins/mapped.py keystone/auth/plugins/oauth1.py keystone/auth/plugins/password.py keystone/auth/plugins/saml2.py keystone/auth/plugins/token.py keystone/auth/plugins/totp.py keystone/catalog/__init__.py keystone/catalog/controllers.py keystone/catalog/core.py keystone/catalog/routers.py keystone/catalog/schema.py keystone/catalog/backends/__init__.py keystone/catalog/backends/sql.py keystone/catalog/backends/templated.py keystone/cmd/__init__.py keystone/cmd/all.py keystone/cmd/cli.py keystone/cmd/manage.py keystone/common/__init__.py keystone/common/authorization.py keystone/common/clean.py keystone/common/config.py keystone/common/controller.py keystone/common/dependency.py keystone/common/driver_hints.py keystone/common/extension.py keystone/common/json_home.py keystone/common/manager.py keystone/common/models.py keystone/common/openssl.py keystone/common/router.py keystone/common/tokenless_auth.py keystone/common/utils.py keystone/common/wsgi.py keystone/common/cache/__init__.py keystone/common/cache/_context_cache.py keystone/common/cache/core.py keystone/common/cache/backends/__init__.py keystone/common/cache/backends/memcache_pool.py keystone/common/cache/backends/mongo.py keystone/common/cache/backends/noop.py keystone/common/environment/__init__.py keystone/common/environment/eventlet_server.py keystone/common/kvs/__init__.py keystone/common/kvs/core.py keystone/common/kvs/backends/__init__.py keystone/common/kvs/backends/inmemdb.py keystone/common/kvs/backends/memcached.py keystone/common/ldap/__init__.py keystone/common/ldap/core.py keystone/common/sql/__init__.py keystone/common/sql/core.py keystone/common/sql/migration_helpers.py keystone/common/sql/migrate_repo/README keystone/common/sql/migrate_repo/__init__.py keystone/common/sql/migrate_repo/manage.py keystone/common/sql/migrate_repo/migrate.cfg keystone/common/sql/migrate_repo/versions/067_kilo.py keystone/common/sql/migrate_repo/versions/068_placeholder.py keystone/common/sql/migrate_repo/versions/069_placeholder.py keystone/common/sql/migrate_repo/versions/070_placeholder.py keystone/common/sql/migrate_repo/versions/071_placeholder.py keystone/common/sql/migrate_repo/versions/072_placeholder.py keystone/common/sql/migrate_repo/versions/073_insert_assignment_inherited_pk.py keystone/common/sql/migrate_repo/versions/074_add_is_domain_project.py keystone/common/sql/migrate_repo/versions/075_confirm_config_registration.py keystone/common/sql/migrate_repo/versions/076_placeholder.py keystone/common/sql/migrate_repo/versions/077_placeholder.py keystone/common/sql/migrate_repo/versions/078_placeholder.py keystone/common/sql/migrate_repo/versions/079_placeholder.py keystone/common/sql/migrate_repo/versions/080_placeholder.py keystone/common/sql/migrate_repo/versions/081_add_endpoint_policy_table.py keystone/common/sql/migrate_repo/versions/082_add_federation_tables.py keystone/common/sql/migrate_repo/versions/083_add_oauth1_tables.py keystone/common/sql/migrate_repo/versions/084_add_revoke_tables.py keystone/common/sql/migrate_repo/versions/085_add_endpoint_filtering_table.py keystone/common/sql/migrate_repo/versions/086_add_duplicate_constraint_trusts.py keystone/common/sql/migrate_repo/versions/087_implied_roles.py keystone/common/sql/migrate_repo/versions/088_domain_specific_roles.py keystone/common/sql/migrate_repo/versions/089_add_root_of_all_domains.py keystone/common/sql/migrate_repo/versions/090_add_local_user_and_password_tables.py keystone/common/sql/migrate_repo/versions/091_migrate_data_to_local_user_and_password_tables.py keystone/common/sql/migrate_repo/versions/092_make_implied_roles_fks_cascaded.py keystone/common/sql/migrate_repo/versions/093_migrate_domains_to_projects.py keystone/common/sql/migrate_repo/versions/094_add_federated_user_table.py keystone/common/sql/migrate_repo/versions/095_add_integer_pkey_to_revocation_event_table.py keystone/common/sql/migrate_repo/versions/096_drop_role_name_constraint.py keystone/common/sql/migrate_repo/versions/__init__.py keystone/common/validation/__init__.py keystone/common/validation/parameter_types.py keystone/common/validation/validators.py keystone/contrib/__init__.py keystone/contrib/admin_crud/__init__.py keystone/contrib/admin_crud/core.py keystone/contrib/ec2/__init__.py keystone/contrib/ec2/controllers.py keystone/contrib/ec2/core.py keystone/contrib/ec2/routers.py keystone/contrib/endpoint_filter/__init__.py keystone/contrib/endpoint_filter/routers.py keystone/contrib/endpoint_filter/backends/__init__.py keystone/contrib/endpoint_filter/backends/catalog_sql.py keystone/contrib/endpoint_filter/backends/sql.py keystone/contrib/endpoint_filter/migrate_repo/__init__.py keystone/contrib/endpoint_filter/migrate_repo/migrate.cfg keystone/contrib/endpoint_filter/migrate_repo/versions/001_add_endpoint_filtering_table.py keystone/contrib/endpoint_filter/migrate_repo/versions/002_add_endpoint_groups.py keystone/contrib/endpoint_filter/migrate_repo/versions/__init__.py keystone/contrib/endpoint_policy/__init__.py keystone/contrib/endpoint_policy/routers.py keystone/contrib/endpoint_policy/backends/__init__.py keystone/contrib/endpoint_policy/backends/sql.py keystone/contrib/endpoint_policy/migrate_repo/__init__.py keystone/contrib/endpoint_policy/migrate_repo/migrate.cfg keystone/contrib/endpoint_policy/migrate_repo/versions/001_add_endpoint_policy_table.py keystone/contrib/endpoint_policy/migrate_repo/versions/__init__.py keystone/contrib/federation/__init__.py keystone/contrib/federation/routers.py keystone/contrib/federation/backends/__init__.py keystone/contrib/federation/backends/sql.py keystone/contrib/federation/migrate_repo/__init__.py keystone/contrib/federation/migrate_repo/migrate.cfg keystone/contrib/federation/migrate_repo/versions/001_add_identity_provider_table.py keystone/contrib/federation/migrate_repo/versions/002_add_mapping_tables.py keystone/contrib/federation/migrate_repo/versions/003_mapping_id_nullable_false.py keystone/contrib/federation/migrate_repo/versions/004_add_remote_id_column.py keystone/contrib/federation/migrate_repo/versions/005_add_service_provider_table.py keystone/contrib/federation/migrate_repo/versions/006_fixup_service_provider_attributes.py keystone/contrib/federation/migrate_repo/versions/007_add_remote_id_table.py keystone/contrib/federation/migrate_repo/versions/008_add_relay_state_to_sp.py keystone/contrib/federation/migrate_repo/versions/__init__.py keystone/contrib/oauth1/__init__.py keystone/contrib/oauth1/routers.py keystone/contrib/oauth1/backends/__init__.py keystone/contrib/oauth1/backends/sql.py keystone/contrib/oauth1/migrate_repo/__init__.py keystone/contrib/oauth1/migrate_repo/migrate.cfg keystone/contrib/oauth1/migrate_repo/versions/001_add_oauth_tables.py keystone/contrib/oauth1/migrate_repo/versions/002_fix_oauth_tables_fk.py keystone/contrib/oauth1/migrate_repo/versions/003_consumer_description_nullalbe.py keystone/contrib/oauth1/migrate_repo/versions/004_request_token_roles_nullable.py keystone/contrib/oauth1/migrate_repo/versions/005_consumer_id_index.py keystone/contrib/oauth1/migrate_repo/versions/__init__.py keystone/contrib/revoke/__init__.py keystone/contrib/revoke/routers.py keystone/contrib/revoke/backends/__init__.py keystone/contrib/revoke/backends/sql.py keystone/contrib/revoke/migrate_repo/__init__.py keystone/contrib/revoke/migrate_repo/migrate.cfg keystone/contrib/revoke/migrate_repo/versions/001_revoke_table.py keystone/contrib/revoke/migrate_repo/versions/002_add_audit_id_and_chain_to_revoke_table.py keystone/contrib/revoke/migrate_repo/versions/__init__.py keystone/contrib/s3/__init__.py keystone/contrib/s3/core.py keystone/contrib/simple_cert/__init__.py keystone/contrib/simple_cert/routers.py keystone/contrib/user_crud/__init__.py keystone/contrib/user_crud/core.py keystone/credential/__init__.py keystone/credential/controllers.py keystone/credential/core.py keystone/credential/routers.py keystone/credential/schema.py keystone/credential/backends/__init__.py keystone/credential/backends/sql.py keystone/endpoint_policy/__init__.py keystone/endpoint_policy/controllers.py keystone/endpoint_policy/core.py keystone/endpoint_policy/routers.py keystone/endpoint_policy/backends/__init__.py keystone/endpoint_policy/backends/sql.py keystone/federation/__init__.py keystone/federation/constants.py keystone/federation/controllers.py keystone/federation/core.py keystone/federation/idp.py keystone/federation/routers.py keystone/federation/schema.py keystone/federation/utils.py keystone/federation/V8_backends/__init__.py keystone/federation/V8_backends/sql.py keystone/federation/backends/__init__.py keystone/federation/backends/sql.py keystone/identity/__init__.py keystone/identity/controllers.py keystone/identity/core.py keystone/identity/generator.py keystone/identity/routers.py keystone/identity/schema.py keystone/identity/backends/__init__.py keystone/identity/backends/ldap.py keystone/identity/backends/sql.py keystone/identity/id_generators/__init__.py keystone/identity/id_generators/sha256.py keystone/identity/mapping_backends/__init__.py keystone/identity/mapping_backends/mapping.py keystone/identity/mapping_backends/sql.py keystone/identity/shadow_backends/__init__.py keystone/identity/shadow_backends/sql.py keystone/locale/keystone-log-critical.pot keystone/locale/keystone-log-error.pot keystone/locale/keystone-log-info.pot keystone/locale/keystone-log-warning.pot keystone/locale/keystone.pot keystone/locale/de/LC_MESSAGES/keystone-log-critical.po keystone/locale/de/LC_MESSAGES/keystone.po keystone/locale/el/LC_MESSAGES/keystone-log-critical.po keystone/locale/en_AU/LC_MESSAGES/keystone-log-critical.po keystone/locale/es/LC_MESSAGES/keystone-log-critical.po keystone/locale/es/LC_MESSAGES/keystone.po keystone/locale/fr/LC_MESSAGES/keystone-log-critical.po keystone/locale/fr/LC_MESSAGES/keystone.po keystone/locale/hu/LC_MESSAGES/keystone-log-critical.po keystone/locale/it/LC_MESSAGES/keystone-log-critical.po keystone/locale/it/LC_MESSAGES/keystone.po keystone/locale/ja/LC_MESSAGES/keystone-log-critical.po keystone/locale/ja/LC_MESSAGES/keystone.po keystone/locale/ko_KR/LC_MESSAGES/keystone-log-critical.po keystone/locale/ko_KR/LC_MESSAGES/keystone-log-error.po keystone/locale/ko_KR/LC_MESSAGES/keystone-log-info.po keystone/locale/ko_KR/LC_MESSAGES/keystone-log-warning.po keystone/locale/ko_KR/LC_MESSAGES/keystone.po keystone/locale/pl_PL/LC_MESSAGES/keystone-log-critical.po keystone/locale/pt_BR/LC_MESSAGES/keystone-log-critical.po keystone/locale/pt_BR/LC_MESSAGES/keystone.po keystone/locale/ru/LC_MESSAGES/keystone-log-critical.po keystone/locale/ru/LC_MESSAGES/keystone.po keystone/locale/tr_TR/LC_MESSAGES/keystone-log-critical.po keystone/locale/tr_TR/LC_MESSAGES/keystone-log-error.po keystone/locale/tr_TR/LC_MESSAGES/keystone-log-warning.po keystone/locale/tr_TR/LC_MESSAGES/keystone.po keystone/locale/zh_CN/LC_MESSAGES/keystone-log-critical.po keystone/locale/zh_CN/LC_MESSAGES/keystone-log-error.po keystone/locale/zh_CN/LC_MESSAGES/keystone.po keystone/locale/zh_TW/LC_MESSAGES/keystone-log-critical.po keystone/locale/zh_TW/LC_MESSAGES/keystone.po keystone/middleware/__init__.py keystone/middleware/auth.py keystone/middleware/core.py keystone/models/__init__.py keystone/models/revoke_model.py keystone/models/token_model.py keystone/oauth1/__init__.py keystone/oauth1/controllers.py keystone/oauth1/core.py keystone/oauth1/routers.py keystone/oauth1/schema.py keystone/oauth1/validator.py keystone/oauth1/backends/__init__.py keystone/oauth1/backends/sql.py keystone/policy/__init__.py keystone/policy/controllers.py keystone/policy/core.py keystone/policy/routers.py keystone/policy/schema.py keystone/policy/backends/__init__.py keystone/policy/backends/rules.py keystone/policy/backends/sql.py keystone/resource/__init__.py keystone/resource/controllers.py keystone/resource/core.py keystone/resource/routers.py keystone/resource/schema.py keystone/resource/V8_backends/__init__.py keystone/resource/V8_backends/sql.py keystone/resource/backends/__init__.py keystone/resource/backends/sql.py keystone/resource/config_backends/__init__.py keystone/resource/config_backends/sql.py keystone/revoke/__init__.py keystone/revoke/controllers.py keystone/revoke/core.py keystone/revoke/model.py keystone/revoke/routers.py keystone/revoke/backends/__init__.py keystone/revoke/backends/sql.py keystone/server/__init__.py keystone/server/backends.py keystone/server/common.py keystone/server/eventlet.py keystone/server/wsgi.py keystone/tests/__init__.py keystone/tests/common/__init__.py keystone/tests/common/auth.py keystone/tests/functional/__init__.py keystone/tests/functional/core.py keystone/tests/functional/shared/__init__.py keystone/tests/functional/shared/test_running.py keystone/tests/hacking/__init__.py keystone/tests/hacking/checks.py keystone/tests/unit/__init__.py keystone/tests/unit/core.py keystone/tests/unit/default_catalog.templates keystone/tests/unit/default_fixtures.py keystone/tests/unit/fakeldap.py keystone/tests/unit/federation_fixtures.py keystone/tests/unit/filtering.py keystone/tests/unit/identity_mapping.py keystone/tests/unit/mapping_fixtures.py keystone/tests/unit/rest.py keystone/tests/unit/test_associate_project_endpoint_extension.py keystone/tests/unit/test_auth.py keystone/tests/unit/test_auth_plugin.py keystone/tests/unit/test_backend_endpoint_policy.py keystone/tests/unit/test_backend_endpoint_policy_sql.py keystone/tests/unit/test_backend_federation_sql.py keystone/tests/unit/test_backend_id_mapping_sql.py keystone/tests/unit/test_backend_kvs.py keystone/tests/unit/test_backend_ldap.py keystone/tests/unit/test_backend_ldap_pool.py keystone/tests/unit/test_backend_rules.py keystone/tests/unit/test_backend_sql.py keystone/tests/unit/test_backend_templated.py keystone/tests/unit/test_catalog.py keystone/tests/unit/test_cert_setup.py keystone/tests/unit/test_cli.py keystone/tests/unit/test_config.py keystone/tests/unit/test_contrib_s3_core.py keystone/tests/unit/test_contrib_simple_cert.py keystone/tests/unit/test_credential.py keystone/tests/unit/test_driver_hints.py keystone/tests/unit/test_entry_points.py keystone/tests/unit/test_exception.py keystone/tests/unit/test_hacking_checks.py keystone/tests/unit/test_ipv6.py keystone/tests/unit/test_kvs.py keystone/tests/unit/test_ldap_livetest.py keystone/tests/unit/test_ldap_pool_livetest.py keystone/tests/unit/test_ldap_tls_livetest.py keystone/tests/unit/test_middleware.py keystone/tests/unit/test_no_admin_token_auth.py keystone/tests/unit/test_policy.py keystone/tests/unit/test_revoke.py keystone/tests/unit/test_sql_livetest.py keystone/tests/unit/test_sql_migrate_extensions.py keystone/tests/unit/test_sql_upgrade.py keystone/tests/unit/test_ssl.py keystone/tests/unit/test_token_bind.py keystone/tests/unit/test_token_provider.py keystone/tests/unit/test_url_middleware.py keystone/tests/unit/test_v2.py keystone/tests/unit/test_v2_controller.py keystone/tests/unit/test_v3.py keystone/tests/unit/test_v3_assignment.py keystone/tests/unit/test_v3_auth.py keystone/tests/unit/test_v3_catalog.py keystone/tests/unit/test_v3_credential.py keystone/tests/unit/test_v3_domain_config.py keystone/tests/unit/test_v3_endpoint_policy.py keystone/tests/unit/test_v3_federation.py keystone/tests/unit/test_v3_filters.py keystone/tests/unit/test_v3_identity.py keystone/tests/unit/test_v3_oauth1.py keystone/tests/unit/test_v3_os_revoke.py keystone/tests/unit/test_v3_policy.py keystone/tests/unit/test_v3_protection.py keystone/tests/unit/test_v3_resource.py keystone/tests/unit/test_v3_trust.py keystone/tests/unit/test_validation.py keystone/tests/unit/test_versions.py keystone/tests/unit/test_wsgi.py keystone/tests/unit/utils.py keystone/tests/unit/assignment/__init__.py keystone/tests/unit/assignment/test_backends.py keystone/tests/unit/assignment/test_core.py keystone/tests/unit/assignment/role_backends/__init__.py keystone/tests/unit/assignment/role_backends/test_sql.py keystone/tests/unit/auth/__init__.py keystone/tests/unit/auth/test_controllers.py keystone/tests/unit/backend/__init__.py keystone/tests/unit/backend/core_ldap.py keystone/tests/unit/backend/core_sql.py keystone/tests/unit/backend/legacy_drivers/__init__.py keystone/tests/unit/backend/legacy_drivers/assignment/__init__.py keystone/tests/unit/backend/legacy_drivers/assignment/V8/__init__.py keystone/tests/unit/backend/legacy_drivers/assignment/V8/sql.py keystone/tests/unit/backend/legacy_drivers/federation/__init__.py keystone/tests/unit/backend/legacy_drivers/federation/V8/__init__.py keystone/tests/unit/backend/legacy_drivers/federation/V8/api_v3.py keystone/tests/unit/backend/legacy_drivers/resource/__init__.py keystone/tests/unit/backend/legacy_drivers/resource/V8/__init__.py keystone/tests/unit/backend/legacy_drivers/resource/V8/sql.py keystone/tests/unit/backend/legacy_drivers/role/__init__.py keystone/tests/unit/backend/legacy_drivers/role/V8/__init__.py keystone/tests/unit/backend/legacy_drivers/role/V8/sql.py keystone/tests/unit/catalog/__init__.py keystone/tests/unit/catalog/test_backends.py keystone/tests/unit/catalog/test_core.py keystone/tests/unit/common/__init__.py keystone/tests/unit/common/test_authorization.py keystone/tests/unit/common/test_injection.py keystone/tests/unit/common/test_json_home.py keystone/tests/unit/common/test_ldap.py keystone/tests/unit/common/test_manager.py keystone/tests/unit/common/test_notifications.py keystone/tests/unit/common/test_sql_core.py keystone/tests/unit/common/test_utils.py keystone/tests/unit/config_files/backend_db2.conf keystone/tests/unit/config_files/backend_ldap.conf keystone/tests/unit/config_files/backend_ldap_pool.conf keystone/tests/unit/config_files/backend_ldap_sql.conf keystone/tests/unit/config_files/backend_liveldap.conf keystone/tests/unit/config_files/backend_multi_ldap_sql.conf keystone/tests/unit/config_files/backend_mysql.conf keystone/tests/unit/config_files/backend_pool_liveldap.conf keystone/tests/unit/config_files/backend_postgresql.conf keystone/tests/unit/config_files/backend_sql.conf keystone/tests/unit/config_files/backend_tls_liveldap.conf keystone/tests/unit/config_files/deprecated.conf keystone/tests/unit/config_files/deprecated_override.conf keystone/tests/unit/config_files/test_auth_plugin.conf keystone/tests/unit/config_files/domain_configs_default_ldap_one_sql/keystone.domain1.conf keystone/tests/unit/config_files/domain_configs_multi_ldap/keystone.Default.conf keystone/tests/unit/config_files/domain_configs_multi_ldap/keystone.domain1.conf keystone/tests/unit/config_files/domain_configs_multi_ldap/keystone.domain2.conf keystone/tests/unit/config_files/domain_configs_one_extra_sql/keystone.domain2.conf keystone/tests/unit/config_files/domain_configs_one_sql_one_ldap/keystone.Default.conf keystone/tests/unit/config_files/domain_configs_one_sql_one_ldap/keystone.domain1.conf keystone/tests/unit/contrib/__init__.py keystone/tests/unit/contrib/federation/__init__.py keystone/tests/unit/contrib/federation/test_utils.py keystone/tests/unit/external/README.rst keystone/tests/unit/external/__init__.py keystone/tests/unit/external/test_timeutils.py keystone/tests/unit/identity/__init__.py keystone/tests/unit/identity/test_backends.py keystone/tests/unit/identity/test_controllers.py keystone/tests/unit/identity/test_core.py keystone/tests/unit/ksfixtures/__init__.py keystone/tests/unit/ksfixtures/appserver.py keystone/tests/unit/ksfixtures/auth_plugins.py keystone/tests/unit/ksfixtures/cache.py keystone/tests/unit/ksfixtures/database.py keystone/tests/unit/ksfixtures/hacking.py keystone/tests/unit/ksfixtures/key_repository.py keystone/tests/unit/ksfixtures/ldapdb.py keystone/tests/unit/ksfixtures/policy.py keystone/tests/unit/ksfixtures/temporaryfile.py keystone/tests/unit/policy/__init__.py keystone/tests/unit/policy/test_backends.py keystone/tests/unit/resource/__init__.py keystone/tests/unit/resource/test_backends.py keystone/tests/unit/resource/test_controllers.py keystone/tests/unit/resource/test_core.py keystone/tests/unit/resource/backends/__init__.py keystone/tests/unit/resource/backends/test_sql.py keystone/tests/unit/resource/config_backends/__init__.py keystone/tests/unit/resource/config_backends/test_sql.py keystone/tests/unit/saml2/idp_saml2_metadata.xml keystone/tests/unit/saml2/signed_saml2_assertion.xml keystone/tests/unit/schema/__init__.py keystone/tests/unit/schema/v2.py keystone/tests/unit/tests/__init__.py keystone/tests/unit/tests/test_core.py keystone/tests/unit/tests/test_utils.py keystone/tests/unit/token/__init__.py keystone/tests/unit/token/test_backends.py keystone/tests/unit/token/test_fernet_provider.py keystone/tests/unit/token/test_pki_provider.py keystone/tests/unit/token/test_pkiz_provider.py keystone/tests/unit/token/test_provider.py keystone/tests/unit/token/test_token_data_helper.py keystone/tests/unit/token/test_token_model.py keystone/tests/unit/token/test_uuid_provider.py keystone/tests/unit/trust/__init__.py keystone/tests/unit/trust/test_backends.py keystone/token/__init__.py keystone/token/_simple_cert.py keystone/token/controllers.py keystone/token/provider.py keystone/token/routers.py keystone/token/utils.py keystone/token/persistence/__init__.py keystone/token/persistence/core.py keystone/token/persistence/backends/__init__.py keystone/token/persistence/backends/kvs.py keystone/token/persistence/backends/memcache.py keystone/token/persistence/backends/memcache_pool.py keystone/token/persistence/backends/sql.py keystone/token/providers/__init__.py keystone/token/providers/common.py keystone/token/providers/pki.py keystone/token/providers/pkiz.py keystone/token/providers/uuid.py keystone/token/providers/fernet/__init__.py keystone/token/providers/fernet/core.py keystone/token/providers/fernet/token_formatters.py keystone/token/providers/fernet/utils.py keystone/trust/__init__.py keystone/trust/controllers.py keystone/trust/core.py keystone/trust/routers.py keystone/trust/schema.py keystone/trust/backends/__init__.py keystone/trust/backends/sql.py keystone/v2_crud/__init__.py keystone/v2_crud/admin_crud.py keystone/v2_crud/user_crud.py keystone/version/__init__.py keystone/version/controllers.py keystone/version/routers.py keystone/version/service.py rally-jobs/README.rst rally-jobs/keystone.yaml releasenotes/notes/.placeholder releasenotes/notes/Assignment_V9_driver-c22be069f7baccb0.yaml releasenotes/notes/DomainSpecificRoles-fc5dd2ef74a1442c.yaml releasenotes/notes/Role_V9_driver-971c3aae14d9963d.yaml releasenotes/notes/V9ResourceDriver-26716f97c0cc1a80.yaml releasenotes/notes/add-bootstrap-cli-192500228cc6e574.yaml releasenotes/notes/admin_token-a5678d712783c145.yaml releasenotes/notes/admin_token-c634ec12fc714255.yaml releasenotes/notes/bp-domain-config-default-82e42d946ee7cb43.yaml releasenotes/notes/bp-url-safe-naming-ad90d6a659f5bf3c.yaml releasenotes/notes/bug-1490804-de58a9606edb31eb.yaml releasenotes/notes/bug-1519210-de76097c974f9c93.yaml releasenotes/notes/bug-1535878-change-get_project-permission-e460af1256a2c056.yaml releasenotes/notes/bug-1542417-d630b7886bb0b369.yaml releasenotes/notes/bug_1526462-df9a3f3974d9040f.yaml releasenotes/notes/catalog-caching-12f2532cfb71325a.yaml releasenotes/notes/catalog_project_id-519f5a70f9f7c4c6.yaml releasenotes/notes/deprecate-endpoint-policy-cfg-option-d018acab72a398a0.yaml releasenotes/notes/deprecate-memcache-token-persistence-eac88c80147ea241.yaml releasenotes/notes/deprecate-v2-apis-894284c17be881d2.yaml releasenotes/notes/deprecated-as-of-mitaka-8534e43fa40c1d09.yaml releasenotes/notes/enable-filter-idp-d0135f4615178cfc.yaml releasenotes/notes/enable-inherit-on-default-54ac435230261a6a.yaml releasenotes/notes/endpoints-from-endpoint_group-project-association-7271fba600322fb6.yaml releasenotes/notes/extensions-to-core-a0d270d216d47276.yaml releasenotes/notes/federation-group-ids-mapping-6c56120d65a5cb22.yaml releasenotes/notes/httpd-keystone-d51b7335559b09c8.yaml releasenotes/notes/impl-templated-catalog-1d8f6333726b34f8.yaml releasenotes/notes/implied-roles-026f401adc0f7fb6.yaml releasenotes/notes/insecure_reponse-2a168230709bc8e7.yaml releasenotes/notes/is-admin-24b34238c83b3a82.yaml releasenotes/notes/ldap-conn-pool-enabled-90df94652f1ded53.yaml releasenotes/notes/ldap-emulation-91c4d535eb9c3d10.yaml releasenotes/notes/list_limit-ldap-support-5d31d51466fc49a6.yaml releasenotes/notes/list_role_assignment_names-33aedc1e521230b6.yaml releasenotes/notes/migration_squash-f655329ddad7fc2a.yaml releasenotes/notes/no-default-domain-2161ada44bf7a3f7.yaml releasenotes/notes/notify-on-user-group-membership-8c0136ee0484e255.yaml releasenotes/notes/oslo.cache-a9ce47bfa8809efa.yaml releasenotes/notes/projects_as_domains-3ea8a58b4c2965e1.yaml releasenotes/notes/remove-trust-auth-support-from-v2-de316c9ba46d556d.yaml releasenotes/notes/removed-as-of-mitaka-9ff14f87d0b98e7e.yaml releasenotes/notes/request_context-e143ba9c446a5952.yaml releasenotes/notes/revert-v2-token-issued-for-non-default-domain-25ea5337f158ef13.yaml releasenotes/notes/s3-aws-v4-c6cb75ce8d2289d4.yaml releasenotes/notes/totp-40d93231714c6a20.yaml releasenotes/notes/v3-endpoints-in-v2-list-b0439816938713d6.yaml releasenotes/notes/v9FederationDriver-cbebcf5f97e1eae2.yaml releasenotes/notes/x509-auth-df0a229780b8e3ff.yaml releasenotes/source/conf.py releasenotes/source/index.rst releasenotes/source/liberty.rst releasenotes/source/unreleased.rst releasenotes/source/_static/.placeholder releasenotes/source/_templates/.placeholder tools/pretty_tox.sh tools/pretty_tox_py3.sh tools/sample_data.shkeystone-9.0.0/keystone.egg-info/PKG-INFO0000664000567000056710000000463012701407245021157 0ustar jenkinsjenkins00000000000000Metadata-Version: 1.1 Name: keystone Version: 9.0.0 Summary: OpenStack Identity Home-page: http://docs.openstack.org/developer/keystone/ Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN Description: ================== OpenStack Keystone ================== Keystone provides authentication, authorization and service discovery mechanisms via HTTP primarily for use by projects in the OpenStack family. It is most commonly deployed as an HTTP interface to existing identity systems, such as LDAP. Developer documentation, the source of which is in ``doc/source/``, is published at: http://docs.openstack.org/developer/keystone/ The API specification and documentation are available at: http://specs.openstack.org/openstack/keystone-specs/ The canonical client library is available at: https://git.openstack.org/cgit/openstack/python-keystoneclient Documentation for cloud administrators is available at: http://docs.openstack.org/ The source of documentation for cloud administrators is available at: https://git.openstack.org/cgit/openstack/openstack-manuals Information about our team meeting is available at: https://wiki.openstack.org/wiki/Meetings/KeystoneMeeting Bugs and feature requests are tracked on Launchpad at: https://bugs.launchpad.net/keystone Future design work is tracked at: http://specs.openstack.org/openstack/keystone-specs/#identity-program-specifications Contributors are encouraged to join IRC (``#openstack-keystone`` on freenode): https://wiki.openstack.org/wiki/IRC For information on contributing to Keystone, see ``CONTRIBUTING.rst``. Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 keystone-9.0.0/keystone.egg-info/top_level.txt0000664000567000056710000000001112701407245022601 0ustar jenkinsjenkins00000000000000keystone keystone-9.0.0/keystone.egg-info/pbr.json0000664000567000056710000000005612701407245021536 0ustar jenkinsjenkins00000000000000{"is_release": true, "git_version": "3e5fca0"}keystone-9.0.0/keystone.egg-info/entry_points.txt0000664000567000056710000001125112701407245023355 0ustar jenkinsjenkins00000000000000[console_scripts] keystone-all = keystone.cmd.all:main keystone-manage = keystone.cmd.manage:main [keystone.assignment] sql = keystone.assignment.backends.sql:Assignment [keystone.auth.external] DefaultDomain = keystone.auth.plugins.external:DefaultDomain Domain = keystone.auth.plugins.external:Domain default = keystone.auth.plugins.external:DefaultDomain [keystone.auth.kerberos] default = keystone.auth.plugins.external:KerberosDomain [keystone.auth.oauth1] default = keystone.auth.plugins.oauth1:OAuth [keystone.auth.openid] default = keystone.auth.plugins.mapped:Mapped [keystone.auth.password] default = keystone.auth.plugins.password:Password [keystone.auth.saml2] default = keystone.auth.plugins.mapped:Mapped [keystone.auth.token] default = keystone.auth.plugins.token:Token [keystone.auth.totp] default = keystone.auth.plugins.totp:TOTP [keystone.auth.x509] default = keystone.auth.plugins.mapped:Mapped [keystone.catalog] endpoint_filter.sql = keystone.contrib.endpoint_filter.backends.catalog_sql:EndpointFilterCatalog sql = keystone.catalog.backends.sql:Catalog templated = keystone.catalog.backends.templated:Catalog [keystone.credential] sql = keystone.credential.backends.sql:Credential [keystone.endpoint_filter] sql = keystone.catalog.backends.sql:Catalog [keystone.endpoint_policy] sql = keystone.endpoint_policy.backends.sql:EndpointPolicy [keystone.federation] sql = keystone.federation.backends.sql:Federation [keystone.identity] ldap = keystone.identity.backends.ldap:Identity sql = keystone.identity.backends.sql:Identity [keystone.identity.id_generator] sha256 = keystone.identity.id_generators.sha256:Generator [keystone.identity.id_mapping] sql = keystone.identity.mapping_backends.sql:Mapping [keystone.identity.shadow_users] sql = keystone.identity.shadow_backends.sql:ShadowUsers [keystone.oauth1] sql = keystone.oauth1.backends.sql:OAuth1 [keystone.policy] rules = keystone.policy.backends.rules:Policy sql = keystone.policy.backends.sql:Policy [keystone.resource] sql = keystone.resource.backends.sql:Resource [keystone.resource.domain_config] sql = keystone.resource.config_backends.sql:DomainConfig [keystone.revoke] sql = keystone.revoke.backends.sql:Revoke [keystone.role] sql = keystone.assignment.role_backends.sql:Role [keystone.token.persistence] kvs = keystone.token.persistence.backends.kvs:Token memcache = keystone.token.persistence.backends.memcache:Token memcache_pool = keystone.token.persistence.backends.memcache_pool:Token sql = keystone.token.persistence.backends.sql:Token [keystone.token.provider] fernet = keystone.token.providers.fernet:Provider pki = keystone.token.providers.pki:Provider pkiz = keystone.token.providers.pkiz:Provider uuid = keystone.token.providers.uuid:Provider [keystone.trust] sql = keystone.trust.backends.sql:Trust [oslo.config.opts] keystone = keystone.common.config:list_opts keystone.notifications = keystone.notifications:list_opts [oslo.config.opts.defaults] keystone = keystone.common.config:set_middleware_defaults [paste.app_factory] admin_service = keystone.version.service:admin_app_factory admin_version_service = keystone.version.service:admin_version_app_factory public_service = keystone.version.service:public_app_factory public_version_service = keystone.version.service:public_version_app_factory service_v3 = keystone.version.service:v3_app_factory [paste.filter_factory] admin_token_auth = keystone.middleware:AdminTokenAuthMiddleware.factory build_auth_context = keystone.middleware:AuthContextMiddleware.factory crud_extension = keystone.contrib.admin_crud:CrudExtension.factory debug = oslo_middleware:Debug.factory ec2_extension = keystone.contrib.ec2:Ec2Extension.factory ec2_extension_v3 = keystone.contrib.ec2:Ec2ExtensionV3.factory endpoint_filter_extension = keystone.contrib.endpoint_filter.routers:EndpointFilterExtension.factory federation_extension = keystone.contrib.federation.routers:FederationExtension.factory json_body = keystone.middleware:JsonBodyMiddleware.factory oauth1_extension = keystone.contrib.oauth1.routers:OAuth1Extension.factory request_id = oslo_middleware:RequestId.factory revoke_extension = keystone.contrib.revoke.routers:RevokeExtension.factory s3_extension = keystone.contrib.s3:S3Extension.factory simple_cert_extension = keystone.contrib.simple_cert:SimpleCertExtension.factory sizelimit = oslo_middleware.sizelimit:RequestBodySizeLimiter.factory token_auth = keystone.middleware:TokenAuthMiddleware.factory url_normalize = keystone.middleware:NormalizingFilter.factory user_crud_extension = keystone.contrib.user_crud:CrudExtension.factory [wsgi_scripts] keystone-wsgi-admin = keystone.server.wsgi:initialize_admin_application keystone-wsgi-public = keystone.server.wsgi:initialize_public_application keystone-9.0.0/.testr.conf0000664000567000056710000000112512701407102016601 0ustar jenkinsjenkins00000000000000[DEFAULT] test_command= ${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./keystone/tests/unit} $LISTOPT $IDOPTION test_id_option=--load-list $IDFILE test_list_option=--list group_regex=.*(test_cert_setup) # NOTE(morganfainberg): If single-worker mode is wanted (e.g. for live tests) # the environment variable ``TEST_RUN_CONCURRENCY`` should be set to ``1``. If # a non-default (1 worker per available core) concurrency is desired, set # environment variable ``TEST_RUN_CONCURRENCY`` to the desired number of # workers. test_run_concurrency=echo ${TEST_RUN_CONCURRENCY:-0} keystone-9.0.0/httpd/0000775000567000056710000000000012701407246015650 5ustar jenkinsjenkins00000000000000keystone-9.0.0/httpd/README0000664000567000056710000000012712701407102016517 0ustar jenkinsjenkins00000000000000Documentation for running Keystone with Apache HTTPD is in doc/source/apache-httpd.rst keystone-9.0.0/httpd/uwsgi-keystone.conf0000664000567000056710000000042512701407102021504 0ustar jenkinsjenkins00000000000000Listen 5000 Listen 35357 ProxyPass / uwsgi://127.0.0.1:5001/ ProxyPass / uwsgi://127.0.0.1:35358/ ProxyPass /identity uwsgi://127.0.0.1:5001/ ProxyPass /identity_admin uwsgi://127.0.0.1:35358/ keystone-9.0.0/httpd/wsgi-keystone.conf0000664000567000056710000000370712701407102021325 0ustar jenkinsjenkins00000000000000Listen 5000 Listen 35357 WSGIDaemonProcess keystone-public processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP} WSGIProcessGroup keystone-public WSGIScriptAlias / /usr/local/bin/keystone-wsgi-public WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On LimitRequestBody 114688 = 2.4> ErrorLogFormat "%{cu}t %M" ErrorLog /var/log/apache2/keystone.log CustomLog /var/log/apache2/keystone_access.log combined = 2.4> Require all granted Order allow,deny Allow from all WSGIDaemonProcess keystone-admin processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP} WSGIProcessGroup keystone-admin WSGIScriptAlias / /usr/local/bin/keystone-wsgi-admin WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On LimitRequestBody 114688 = 2.4> ErrorLogFormat "%{cu}t %M" ErrorLog /var/log/apache2/keystone.log CustomLog /var/log/apache2/keystone_access.log combined = 2.4> Require all granted Order allow,deny Allow from all Alias /identity /usr/local/bin/keystone-wsgi-public SetHandler wsgi-script Options +ExecCGI WSGIProcessGroup keystone-public WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On Alias /identity_admin /usr/local/bin/keystone-wsgi-admin SetHandler wsgi-script Options +ExecCGI WSGIProcessGroup keystone-admin WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On keystone-9.0.0/httpd/keystone-uwsgi-public.ini0000664000567000056710000000101412701407102022605 0ustar jenkinsjenkins00000000000000[uwsgi] wsgi-file = /usr/local/bin/keystone-wsgi-public # Versions of mod_proxy_uwsgi>=2.0.6 should use a UNIX socket, see # http://uwsgi-docs.readthedocs.org/en/latest/Apache.html#mod-proxy-uwsgi uwsgi-socket = 127.0.0.1:5001 # Override the default size for headers from the 4k default. buffer-size = 65535 # This is running standalone master = true enable-threads = true # Tune this to your environment. threads = 4 # uwsgi recommends this to prevent thundering herd on accept. thunder-lock = true plugins = python keystone-9.0.0/httpd/keystone.py0000664000567000056710000000254612701407102020061 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_log import log from oslo_log import versionutils from keystone.i18n import _LW from keystone.server import wsgi as wsgi_server name = os.path.basename(__file__) LOG = log.getLogger(__name__) def deprecation_warning(): versionutils.report_deprecated_feature( LOG, _LW('httpd/keystone.py is deprecated as of Mitaka' ' in favor of keystone-wsgi-admin and keystone-wsgi-public' ' and may be removed in O.') ) # NOTE(ldbragst): 'application' is required in this context by WSGI spec. # The following is a reference to Python Paste Deploy documentation # http://pythonpaste.org/deploy/ application = wsgi_server.initialize_application( name, post_log_configured_function=deprecation_warning) keystone-9.0.0/httpd/keystone-uwsgi-admin.ini0000664000567000056710000000101412701407102022417 0ustar jenkinsjenkins00000000000000[uwsgi] wsgi-file = /usr/local/bin/keystone-wsgi-admin # Versions of mod_proxy_uwsgi>=2.0.6 should use a UNIX socket, see # http://uwsgi-docs.readthedocs.org/en/latest/Apache.html#mod-proxy-uwsgi uwsgi-socket = 127.0.0.1:35358 # Override the default size for headers from the 4k default. buffer-size = 65535 # This is running standalone master = true enable-threads = true # Tune this to your environment. threads = 4 # uwsgi recommends this to prevent thundering herd on accept. thunder-lock = true plugins = python keystone-9.0.0/AUTHORS0000664000567000056710000004250512701407245015602 0ustar jenkinsjenkins00000000000000Adam Gandelman Adam Young Adam Young Adipudi Praveena Ajaya Agrawal Akira YOSHIYAMA Alan Pevec Alan Pevec Alberto Planas Alessio Ababilov Alessio Ababilov Alex Gaynor Alex Silva Alexander Makarov Alexander Maretskiy Alexey Miroshkin Allan Feid Alvaro Lopez Garcia Amy Marrich Andreas Jaeger Andreas Jaeger Andrew Bogott Andrey Andrey Kurilin Andrey Pavlov Andy Smith Angus Lees Ankit Agrawal Anna Sortland Anne Gentle Anthony Dodd Anthony Young Arun Kant Arvind Tiwari Atsushi SAKAI Ben Nemec Bernhard M. Wiedemann Bhuvan Arumugam Bo-Chun Lin Bob Thyne Boris Bobrov Brad Pokorny Brad Topol Brant Knudson Brian Lamar Brian Waldon Bruno Semperlotti Carlos D. Garza Carlos Marin Chandan kumar Chang Bo Guo ChangBo Guo(gcb) Chaozhe.Chen Chen Li Chloe Jensen Chmouel Boudjnah Chris Jones Chris Yeoh Christian Berendt Christina Darretta Christoph Gysin Chuck Short Clark Boylan Clayton O'Neill Clenimar Clenimar Filemon Clint Byrum Cole Robinson Colleen Murphy Craig Jellick Dan Prince Dan Prince Dan Radez Daniel Gollub Darren Birkett Darren Shaw Davanum Srinivas Davanum Srinivas Dave Chen David Höppner <0xffea@gmail.com> David Lyle David Ripton David Stanek Dean Troyer Deepak Garg Deepti Ramakrishna Derek Higgins Derek Yarnell Devin Carlen Diego Adolfo de Araújo Dirk Mueller Divya Dmitriy Bogun Dmitry Khovyakov Dmitry Khovyakov Dolph Mathews Doug Hellmann Doug Hellmann Du Yujie Ed Leafe Edgar Magana Edmund Rhudy Eduardo Patrocinio Edward Hope-Morley Einar Forselv Elena Ezhova Emilien Macchi Eoghan Glynn Eric Brown Eric Guo Everett Toews Ewan Mellor Fabio Giannetti Felix Li Fernando Diaz Florent Flament Francois Deppierraz Gabriel Hurley Gerhard Muntingh Ghe Rivero Ghe Rivero Gordon Chung Grzegorz Grasza Guang Yee Haiwei Xu Haneef Ali Harini Harry Rybacki Harshada Mangesh Kakad Hengqing Hu Henrique Truta Henry Nash Hidekazu Nakamura Hirofumi Ichihara Hugh Saunders Hugo Nicodemos Ian Denhardt Ian Wienand Ilya Pekelny Ionuț Arțăriși Irina Ivan Mironov J. Daniel Schmidt James Carey James E. Blair James E. Blair James Page James Slagle Jamie Lennox Jamie Lennox Jamie Lennox Jan Provaznik Jaroslav Henner Jason Cannavale Jason Cannavale Javeme Javier Pena Jay Pipes Jeremy Stanley Jesse Andrews Jianing YANG Jin Nan Zhang Joe Duhamel Joe Gordon Joe Heck Joe Savak Joe Savak Johannes Erdfelt John Bresnahan John Dennis John Dewey John Dickinson John Eo John Eo John Warren Jorge L. Williams Jorge Munoz Jorge Munoz Jose Castro Leon Joseph W. Breu Josh Kearney Juan Antonio Osorio Juan Manuel Olle Julian Edwards Julien Danjou Julien Danjou Justin Santa Barbara Justin Shepherd KIYOHIRO ADACHI Kamil Rykowski Kanagaraj Manickam Kanami Akama Ken Pepple Ken Thomas Ken'ichi Ohmichi Kent Wang Kevin Benton Kevin Kirkpatrick Kevin L. Mitchell Khaled Hussein Kiall Mac Innes KnightHacker Konstantin Maximov Kristi Nikolla Kristy Siu Krsna Widmer Kui Shi Kun Huang Kurt Taylor Kévin Bernard-Allies Lance Bragstad Lance Bragstad Lars Butler Lei Zhang Li Ma Liang Bo Liang Chen Liem Nguyen Lin Hua Cheng Lin Hua Cheng Lin Yang LiuNanke Luis A. Garcia Malini Bhandaru Mandell Degerness Marc Koderer Marco Fargetta Marcos Lobo Marek Denis Mark Gius Mark J. Washenberger Mark McClain Mark McLoughlin Maru Newby Mat Grove Mathew Odden Matt Fischer Matt Odden Matt Riedemann Matthew Edmonds Matthew Treinish Matthew Treinish Matthieu Huin Michael Basnight Michael J Fork Michael Krotscheck Michael Still Michael Tupitsyn Mikhail Durnosvistov Min Song Mohammed Naser Monty Taylor Morgan Fainberg Nachiappan VR N Nathan Kinder Nathanael Burton Navid Pustchi Nina Goradia Ning Sun Nisha Yadav Olivier Pilotte Ondřej Nový Pandiyan Paul Belanger Paul McMillan Paul Voccio Paulo Ewerton Pavel Sedlák Peng Yong Pete Zaitcev Peter Feiner Peter Razumovsky Priti Desai Priti Desai Pádraig Brady Pádraig Brady Qiaowei Ren Rafael Durán Castañeda Raildo Mascena Rajesh Tailor Ralf Haferkamp Ramana Juvvadi Ravi Shekhar Jethani Ray Chen Rich Megginson Rick Hull Rishabh Jain Robert Collins Robert Collins Robert H. Hyerle Robin Norwood Rodrigo Duarte Sousa Roman Bodnarchuk Roman Bogorodskiy Roman Verchikov Ron De Rose Ronald Bradford Ronald De Rose Rongze Zhu RongzeZhu Roxana Gherle Rudolf Vriend Russell Bryant Ryan Bak Sahdev Zala Sai Krishna Salvatore Orlando Sam Morrison Samuel de Medeiros Queiroz Samuel de Medeiros Queiroz Sandy Walsh Sarvesh Ranjan Sascha Peilicke Sascha Peilicke Saulo Aislan Sean Dague Sean Dague Sean Perry Sean Perry Sean Winn Sergey Lukjanov Sergey Nikitin Sergey Nuzdhin Sergey Skripnick Sergey Vilgelm Shane Wang Shevek Shuquan Huang Simo Sorce Sirish Bitra Slawomir Gonet Sony K. Philip Stanisław Pitucha Stef T Steve Baker Steve Martinelli Steven Hardy Stuart McLaren Sushil Kumar Syed Armani Sylvain Afchain THOMAS J. COCOZZELLO Tahmina Ahmed Takashi NATSUME Telles Nobrega Theodore Ilie Thierry Carrez Thomas Bechtold Thomas Hsiao Tim Simpson Timothy Symanczyk Tin Lam Todd Willey Tom Cameron Tom Cocozzello Tom Fifield Tom Fifield Tony NIU Tony Wang Tushar Patil Unmesh Gurjar Unmesh Gurjar Varun Mittal Venkatesh Sampath Victor Morales Victor Sergeyev Victor Silva Victor Stinner Vincent Hou Vincent Untz Vishvananda Ishaya Vivek Dhayaal Vladimir Eremin Will Kelly William Kelly Wu Wenxiang XiaBing Yao Xuhan Peng YaHong Du Yaguang Tang Yaguang Tang YangLei Yejia Xu Yogeshwar Srikrishnan Yong Sheng Gong Yong Sheng Gong You Ji You Yamagata Yuiko Takada Yun Mao Yuriy Taraday Zhang Chun Zhang Jinnan Zhenguo Niu ZhiQiang Fan ZhiQiang Fan Zhongyue Luo Ziad Sawalha abhishekkekane ajayaa alatynskaya algerwang annegentle anusha-rayani-7 april ayoung boden bruceSz bsirish chioleong damon-devops daniel-a-nguyen darren-wang dcramer galstrom21 gengjh gholt guang-yee guang-yee henriquetruta hgangwx houming-wang huangtianhua jabdul jakedahn janonymous jiataotj jiaxi jinxingfang jun xie leekelby lin-hua-cheng liu-sheng liuchenhong liuqing long-wang ls1175 lvdongbing malei mari-linhares mathrock monsterxx03 mouad benchchaoui naveenkunareddy phil-hopkins-a pmoosh r-sekine root rpedde ruichen saikrishna saikrishna1511@gmail.com sam leong saradpatel saranjan sathish-nagappan sirish bitra tanlin termie venkatamahesh vishvananda wanghong wanglong werner mendizabal xiexs xingzhou yangyapeng zhang-jinnan zhang.lei zhangguoqing zhiyuan_cai ziadsawalha zouyee Édouard Thuleau “Fernando keystone-9.0.0/requirements.txt0000664000567000056710000000247712701407102020012 0ustar jenkinsjenkins00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. pbr>=1.6 # Apache-2.0 WebOb>=1.2.3 # MIT eventlet!=0.18.3,>=0.18.2 # MIT greenlet>=0.3.2 # MIT PasteDeploy>=1.5.0 # MIT Paste # MIT Routes!=2.0,!=2.1,>=1.12.3;python_version=='2.7' # MIT Routes!=2.0,>=1.12.3;python_version!='2.7' # MIT cryptography>=1.0 # BSD/Apache-2.0 six>=1.9.0 # MIT SQLAlchemy<1.1.0,>=1.0.10 # MIT sqlalchemy-migrate>=0.9.6 # Apache-2.0 stevedore>=1.5.0 # Apache-2.0 passlib>=1.6 # BSD python-keystoneclient!=1.8.0,!=2.1.0,>=1.6.0 # Apache-2.0 keystonemiddleware!=4.1.0,>=4.0.0 # Apache-2.0 oslo.cache>=1.5.0 # Apache-2.0 oslo.concurrency>=3.5.0 # Apache-2.0 oslo.config>=3.7.0 # Apache-2.0 oslo.context>=0.2.0 # Apache-2.0 oslo.messaging>=4.0.0 # Apache-2.0 oslo.db>=4.1.0 # Apache-2.0 oslo.i18n>=2.1.0 # Apache-2.0 oslo.log>=1.14.0 # Apache-2.0 oslo.middleware>=3.0.0 # Apache-2.0 oslo.policy>=0.5.0 # Apache-2.0 oslo.serialization>=1.10.0 # Apache-2.0 oslo.service>=1.0.0 # Apache-2.0 oslo.utils>=3.5.0 # Apache-2.0 oauthlib>=0.6 # BSD pysaml2<4.0.3,>=2.4.0 # Apache-2.0 dogpile.cache>=0.5.7 # BSD jsonschema!=2.5.0,<3.0.0,>=2.0.0 # MIT pycadf!=2.0.0,>=1.1.0 # Apache-2.0 msgpack-python>=0.4.0 # Apache-2.0 keystone-9.0.0/HACKING.rst0000664000567000056710000000261612701407102016317 0ustar jenkinsjenkins00000000000000Keystone Style Commandments =========================== - Step 1: Read the OpenStack Style Commandments http://docs.openstack.org/developer/hacking/ - Step 2: Read on Keystone Specific Commandments ------------------------------ - Avoid using "double quotes" where you can reasonably use 'single quotes' TODO vs FIXME ------------- - TODO(name): implies that something should be done (cleanup, refactoring, etc), but is expected to be functional. - FIXME(name): implies that the method/function/etc shouldn't be used until that code is resolved and bug fixed. Logging ------- Use the common logging module, and ensure you ``getLogger``:: from oslo_log import log LOG = log.getLogger(__name__) LOG.debug('Foobar') AssertEqual argument order -------------------------- assertEqual method's arguments should be in ('expected', 'actual') order. Properly Calling Callables -------------------------- Methods, functions and classes can specify optional parameters (with default values) using Python's keyword arg syntax. When providing a value to such a callable we prefer that the call also uses keyword arg syntax. For example:: def f(required, optional=None): pass # GOOD f(0, optional=True) # BAD f(0, True) This gives us the flexibility to re-order arguments and more importantly to add new required arguments. It's also more explicit and easier to read. keystone-9.0.0/CONTRIBUTING.rst0000664000567000056710000000121412701407102017153 0ustar jenkinsjenkins00000000000000If you would like to contribute to the development of OpenStack, you must follow the steps in this page: http://docs.openstack.org/infra/manual/developers.html If you already have a good understanding of how the system works and your OpenStack accounts are set up, you can skip to the development workflow section of this documentation to learn how changes to OpenStack should be submitted for review via the Gerrit tool: http://docs.openstack.org/infra/manual/developers.html#development-workflow Pull requests submitted through GitHub will be ignored. Bugs should be filed on Launchpad, not GitHub: https://bugs.launchpad.net/keystone keystone-9.0.0/releasenotes/0000775000567000056710000000000012701407246017216 5ustar jenkinsjenkins00000000000000keystone-9.0.0/releasenotes/notes/0000775000567000056710000000000012701407246020346 5ustar jenkinsjenkins00000000000000keystone-9.0.0/releasenotes/notes/list_limit-ldap-support-5d31d51466fc49a6.yaml0000664000567000056710000000025212701407102030162 0ustar jenkinsjenkins00000000000000--- features: - > [`bug 1501698 `_] Support parameter `list_limit` when LDAP is used as identity backend. ././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000keystone-9.0.0/releasenotes/notes/revert-v2-token-issued-for-non-default-domain-25ea5337f158ef13.yamlkeystone-9.0.0/releasenotes/notes/revert-v2-token-issued-for-non-default-domain-25ea5337f158ef13.yam0000664000567000056710000000104012701407102033706 0ustar jenkinsjenkins00000000000000fixes: - > [`bug 1527759 `_] Reverted the change that eliminates the ability to get a V2 token with a user or project that is not in the default domain. This change broke real-world deployments that utilized the ability to authenticate via V2 API with a user not in the default domain or with a project not in the default domain. The deployer is being convinced to update code to properly handle V3 auth but the fix broke expected and tested behavior. keystone-9.0.0/releasenotes/notes/no-default-domain-2161ada44bf7a3f7.yaml0000664000567000056710000000047012701407102027012 0ustar jenkinsjenkins00000000000000--- other: - > ``keystone-manage db_sync`` will no longer create the Default domain. This domain is used as the domain for any users created using the legacy v2.0 API. A default domain is created by ``keystone-manage bootstrap`` and when a user or project is created using the legacy v2.0 API. keystone-9.0.0/releasenotes/notes/DomainSpecificRoles-fc5dd2ef74a1442c.yaml0000664000567000056710000000120212701407102027450 0ustar jenkinsjenkins00000000000000--- features: - > [`blueprint domain-specific-roles `_] Roles can now be optionally defined as domain specific. Domain specific roles are not referenced in policy files, rather they can be used to allow a domain to build their own private inference rules with implied roles. A domain specific role can be assigned to a domain or project within its domain, and any subset of global roles it implies will appear in a token scoped to the respective domain or project. The domain specific role itself, however, will not appear in the token. keystone-9.0.0/releasenotes/notes/list_role_assignment_names-33aedc1e521230b6.yaml0000664000567000056710000000043312701407102031023 0ustar jenkinsjenkins00000000000000--- features: - > [`bug 1479569 `_] Names have been added to list role assignments (GET /role_assignments?include_names=True), rather than returning just the internal IDs of the objects the names are also returned. keystone-9.0.0/releasenotes/notes/federation-group-ids-mapping-6c56120d65a5cb22.yaml0000664000567000056710000000045412701407102031026 0ustar jenkinsjenkins00000000000000--- features: - > [`blueprint federation-group-ids-mapped-without-domain-reference `_] Enhanced the federation mapping engine to allow for group IDs to be referenced without a domain ID. keystone-9.0.0/releasenotes/notes/notify-on-user-group-membership-8c0136ee0484e255.yaml0000664000567000056710000000035712701407102031464 0ustar jenkinsjenkins00000000000000--- fixes: - Support has now been added to send notification events on user/group membership. When a user is added or removed from a group a notification will be sent including the identifiers of both the user and the group. keystone-9.0.0/releasenotes/notes/deprecate-memcache-token-persistence-eac88c80147ea241.yaml0000664000567000056710000000045012701407102032566 0ustar jenkinsjenkins00000000000000--- deprecations: - > [`blueprint deprecated-as-of-mitaka `_] The token memcache and memcache_pool persistence backends have been deprecated in favor of using Fernet tokens (which require no persistence). keystone-9.0.0/releasenotes/notes/deprecate-endpoint-policy-cfg-option-d018acab72a398a0.yaml0000664000567000056710000000044312701407102032614 0ustar jenkinsjenkins00000000000000--- deprecations: - > [`blueprint deprecated-as-of-mitaka `_] Deprecate the ``enabled`` option from ``[endpoint_policy]``, it will be removed in the 'O' release, and the extension will always be enabled. keystone-9.0.0/releasenotes/notes/enable-filter-idp-d0135f4615178cfc.yaml0000664000567000056710000000053012701407102026636 0ustar jenkinsjenkins00000000000000--- features: - > [`bug 1525317 `_] Enable filtering of identity providers based on `id`, and `enabled` attributes. - > [`bug 1555830 `_] Enable filtering of service providers based on `id`, and `enabled` attributes.keystone-9.0.0/releasenotes/notes/httpd-keystone-d51b7335559b09c8.yaml0000664000567000056710000000046412701407102026273 0ustar jenkinsjenkins00000000000000--- deprecations: - > [`blueprint deprecated-as-of-mitaka `_] The file ``httpd/keystone.py`` has been deprecated in favor of ``keystone-wsgi-admin`` and ``keystone-wsgi-public`` and may be removed in the 'O' release. keystone-9.0.0/releasenotes/notes/admin_token-c634ec12fc714255.yaml0000664000567000056710000000116212701407102025642 0ustar jenkinsjenkins00000000000000--- security: - The admin_token method of authentication was never intended to be used for any purpose other than bootstrapping an install. However many deployments had to leave the admin_token method enabled due to restrictions on editing the paste file used to configure the web pipelines. To minimize the risk from this mechanism, the `admin_token` configuration value now defaults to a python `None` value. In addition, if the value is set to `None`, either explicitly or implicitly, the `admin_token` will not be enabled, and an attempt to use it will lead to a failed authentication. keystone-9.0.0/releasenotes/notes/bp-domain-config-default-82e42d946ee7cb43.yaml0000664000567000056710000000045412701407102030205 0ustar jenkinsjenkins00000000000000--- features: - > [`blueprint domain-config-default `_] The Identity API now supports retrieving the default values for the configuration options that can be overriden via the domain specific configuration API. keystone-9.0.0/releasenotes/notes/extensions-to-core-a0d270d216d47276.yaml0000664000567000056710000000254312701407102027040 0ustar jenkinsjenkins00000000000000--- upgrade: - > The `keystone-paste.ini` file must be updated to remove extension filters, and their use in ``[pipeline:api_v3]``. Remove the following filters: ``[filter:oauth1_extension]``, ``[filter:federation_extension]``, ``[filter:endpoint_filter_extension]``, and ``[filter:revoke_extension]``. See the sample `keystone-paste.ini `_ file for guidance. - > The `keystone-paste.ini` file must be updated to remove extension filters, and their use in ``[pipeline:public_api]`` and ``[pipeline:admin_api]`` pipelines. Remove the following filters: ``[filter:user_crud_extension]``, ``[filter:crud_extension]``. See the sample `keystone-paste.ini `_ file for guidance. other: - > [`blueprint move-extensions `_] If any extension migrations are run, for example: ``keystone-manage db_sync --extension endpoint_policy`` an error will be returned. This is working as designed. To run these migrations simply run: ``keystone-manage db_sync``. The complete list of affected extensions are: ``oauth1``, ``federation``, ``endpoint_filter``, ``endpoint_policy``, and ``revoke``. keystone-9.0.0/releasenotes/notes/implied-roles-026f401adc0f7fb6.yaml0000664000567000056710000000120112701407102026243 0ustar jenkinsjenkins00000000000000--- features: - > [`blueprint implied-roles `_] Keystone now supports creating implied roles. Role inference rules can now be added to indicate when the assignment of one role implies the assignment of another. The rules are of the form `prior_role` implies `implied_role`. At token generation time, user/group assignments of roles that have implied roles will be expanded to also include such roles in the token. The expansion of implied roles is controlled by the `prohibited_implied_role` option in the `[assignment]` section of `keystone.conf`. keystone-9.0.0/releasenotes/notes/v9FederationDriver-cbebcf5f97e1eae2.yaml0000664000567000056710000000035012701407102027532 0ustar jenkinsjenkins00000000000000--- deprecations: - The V8 Federation driver interface is deprecated in favor of the V9 Federation driver interface. Support for the V8 Federation driver interface is planned to be removed in the 'O' release of OpenStack. keystone-9.0.0/releasenotes/notes/.placeholder0000664000567000056710000000000012701407102022606 0ustar jenkinsjenkins00000000000000keystone-9.0.0/releasenotes/notes/insecure_reponse-2a168230709bc8e7.yaml0000664000567000056710000000047212701407102026651 0ustar jenkinsjenkins00000000000000--- upgrade: - A new config option, `insecure_debug`, is added to control whether debug information is returned to clients. This used to be controlled by the `debug` option. If you'd like to return extra information to clients set the value to ``true``. This extra information may help an attacker. ././@LongLink0000000000000000000000000000015200000000000011213 Lustar 00000000000000keystone-9.0.0/releasenotes/notes/endpoints-from-endpoint_group-project-association-7271fba600322fb6.yamlkeystone-9.0.0/releasenotes/notes/endpoints-from-endpoint_group-project-association-7271fba600322fb60000664000567000056710000000045012701407102034355 0ustar jenkinsjenkins00000000000000--- fixes: - > [`bug 1516469 `_] Endpoints filtered by endpoint_group project association will be included in the service catalog when a project scoped token is issued and ``endpoint_filter.sql`` is used for the catalog driver. keystone-9.0.0/releasenotes/notes/catalog_project_id-519f5a70f9f7c4c6.yaml0000664000567000056710000000053012701407102027266 0ustar jenkinsjenkins00000000000000--- deprecations: - Use of ``$(tenant_id)s`` in the catalog endpoints is deprecated in favor of ``$(project_id)s``. features: - Keystone supports ``$(project_id)s`` in the catalog. It works the same as ``$(tenant_id)s``. Use of ``$(tenant_id)s`` is deprecated and catalog endpoints should be updated to use ``$(project_id)s``. keystone-9.0.0/releasenotes/notes/bug-1535878-change-get_project-permission-e460af1256a2c056.yaml0000664000567000056710000000053012701407102032674 0ustar jenkinsjenkins00000000000000--- fixes: - > [`bug 1535878 `_] Originally, to perform GET /projects/{project_id}, the provided policy files required a user to have at least project admin level of permission. They have been updated to allow it to be performed by any user who has a role on the project. keystone-9.0.0/releasenotes/notes/impl-templated-catalog-1d8f6333726b34f8.yaml0000664000567000056710000000063012701407102027633 0ustar jenkinsjenkins00000000000000--- other: - > [`bug 1367113 `_] The "get entity" and "list entities" functionality for the KVS catalog backend has been reimplemented to use the data from the catalog template. Previously this would only act on temporary data that was created at runtime. The create, update and delete entity functionality now raises an exception. keystone-9.0.0/releasenotes/notes/bug_1526462-df9a3f3974d9040f.yaml0000664000567000056710000000027512701407102025143 0ustar jenkinsjenkins00000000000000--- features: - > [`bug 1526462 `_] Support for posixGroups with OpenDirectory and UNIX when using the LDAP identity driver. keystone-9.0.0/releasenotes/notes/Assignment_V9_driver-c22be069f7baccb0.yaml0000664000567000056710000000117512701407102027710 0ustar jenkinsjenkins00000000000000--- deprecations: - > [`blueprint deprecated-as-of-mitaka `_] The V8 Assignment driver interface is deprecated. Support for the V8 Assignment driver interface is planned to be removed in the 'O' release of OpenStack. other: - The list_project_ids_for_user(), list_domain_ids_for_user(), list_user_ids_for_project(), list_project_ids_for_groups(), list_domain_ids_for_groups(), list_role_ids_for_groups_on_project() and list_role_ids_for_groups_on_domain() methods have been removed from the V9 version of the Assignment driver. keystone-9.0.0/releasenotes/notes/deprecated-as-of-mitaka-8534e43fa40c1d09.yaml0000664000567000056710000000315312701407102027723 0ustar jenkinsjenkins00000000000000--- deprecations: - > [`blueprint deprecated-as-of-mitaka `_] As of the Mitaka release, the PKI and PKIz token formats have been deprecated. They will be removed in the 'O' release. Due to this change, the `hash_algorithm` option in the `[token]` section of the configuration file has also been deprecated. Also due to this change, the ``keystone-manage pki_setup`` command has been deprecated as well. - > [`blueprint deprecated-as-of-mitaka `_] As of the Mitaka release, write support for the LDAP driver of the Identity backend has been deprecated. This includes the following operations: create user, create group, delete user, delete group, update user, update group, add user to group, and remove user from group. These operations will be removed in the 'O' release. - > [`blueprint deprecated-as-of-mitaka `_] As of the Mitaka release, the auth plugin `keystone.auth.plugins.saml2.Saml2` has been deprecated. It is recommended to use `keystone.auth.plugins.mapped.Mapped` instead. The ``saml2`` plugin will be removed in the 'O' release. - > [`blueprint deprecated-as-of-mitaka `_] As of the Mitaka release, the simple_cert_extension is deprecated since it is only used in support of the PKI and PKIz token formats. It will be removed in the 'O' release. keystone-9.0.0/releasenotes/notes/deprecate-v2-apis-894284c17be881d2.yaml0000664000567000056710000000062412701407102026527 0ustar jenkinsjenkins00000000000000--- deprecations: - > [`blueprint deprecated-as-of-mitaka `_] Deprecated all v2.0 APIs. The keystone team recommends using v3 APIs instead. Most v2.0 APIs will be removed in the 'Q' release. However, the authentication APIs and EC2 APIs are indefinitely deprecated and will not be removed in the 'Q' release. keystone-9.0.0/releasenotes/notes/x509-auth-df0a229780b8e3ff.yaml0000664000567000056710000000036312701407102025171 0ustar jenkinsjenkins00000000000000--- features: - > [`blueprint x509-ssl-client-cert-authn `_] Keystone now supports tokenless client SSL x.509 certificate authentication and authorization. keystone-9.0.0/releasenotes/notes/remove-trust-auth-support-from-v2-de316c9ba46d556d.yaml0000664000567000056710000000022312701407102032132 0ustar jenkinsjenkins00000000000000--- other: - The ability to validate a trust-scoped token against the v2.0 API has been removed, in favor of using the version 3 of the API. keystone-9.0.0/releasenotes/notes/removed-as-of-mitaka-9ff14f87d0b98e7e.yaml0000664000567000056710000000464312701407102027464 0ustar jenkinsjenkins00000000000000--- other: - > [`blueprint removed-as-of-mitaka `_] Removed ``extras`` from token responses. These fields should not be necessary and a well-defined API makes this field redundant. This was deprecated in the Kilo release. - > [`blueprint removed-as-of-mitaka `_] Removed ``RequestBodySizeLimiter`` from keystone middleware. The keystone team suggests using ``oslo_middleware.sizelimit.RequestBodySizeLimiter`` instead. This was deprecated in the Kilo release. - > [`blueprint removed-as-of-mitaka `_] Notifications with event_type ``identity.created.role_assignment`` and ``identity.deleted.role_assignment`` have been removed. The keystone team suggests listening for ``identity.role_assignment.created`` and ``identity.role_assignment.deleted`` instead. This was deprecated in the Kilo release. - > [`blueprint removed-as-of-mitaka `_] Removed ``check_role_for_trust`` from the trust controller, ensure policy files do not refer to this target. This was deprecated in the Kilo release. - > [`blueprint removed-as-of-mitaka `_] Removed Catalog KVS backend (``keystone.catalog.backends.sql.Catalog``). This was deprecated in the Icehouse release. - > [`blueprint removed-as-of-mitaka `_] The LDAP backend for Assignment has been removed. This was deprecated in the Kilo release. - > [`blueprint removed-as-of-mitaka `_] The LDAP backend for Resource has been removed. This was deprecated in the Kilo release. - > [`blueprint removed-as-of-mitaka `_] The LDAP backend for Role has been removed. This was deprecated in the Kilo release. - > [`blueprint removed-as-of-mitaka `_] Removed Revoke KVS backend (``keystone.revoke.backends.kvs.Revoke``). This was deprecated in the Juno release. keystone-9.0.0/releasenotes/notes/V9ResourceDriver-26716f97c0cc1a80.yaml0000664000567000056710000000026512701407102026542 0ustar jenkinsjenkins00000000000000--- deprecations: - The V8 Resource driver interface is deprecated. Support for the V8 Resource driver interface is planned to be removed in the 'O' release of OpenStack. keystone-9.0.0/releasenotes/notes/bug-1490804-de58a9606edb31eb.yaml0000664000567000056710000000105712701407102025207 0ustar jenkinsjenkins00000000000000--- features: - > [`bug 1490804 `_] Audit IDs are included in the token revocation list. security: - > [`bug 1490804 `_] [`CVE-2015-7546 `_] A bug is fixed where an attacker could avoid token revocation when the PKI or PKIZ token provider is used. The complete remediation for this vulnerability requires the corresponding fix in the keystonemiddleware project. keystone-9.0.0/releasenotes/notes/v3-endpoints-in-v2-list-b0439816938713d6.yaml0000664000567000056710000000033012701407102027377 0ustar jenkinsjenkins00000000000000--- fixes: - > [`bug 1480270 `_] Endpoints created when using v3 of the keystone REST API will now be included when listing endpoints via the v2.0 API. keystone-9.0.0/releasenotes/notes/bug-1542417-d630b7886bb0b369.yaml0000664000567000056710000000156612701407102024774 0ustar jenkinsjenkins00000000000000--- features: - > [`bug 1542417 `_] Added support for a `user_description_attribute` mapping to the LDAP driver configuration. upgrade: - > The LDAP driver now also maps the user description attribute after user retrieval from LDAP. If this is undesired behavior for your setup, please add `description` to the `user_attribute_ignore` LDAP driver config setting. The default mapping of the description attribute is set to `description`. Please adjust the LDAP driver config setting `user_description_attribute` if your LDAP uses a different attribute name (for instance to `displayName` in case of an AD backed LDAP). If your `user_additional_attribute_mapping` setting contains `description:description` you can remove this mapping, since this is now the default behavior. keystone-9.0.0/releasenotes/notes/add-bootstrap-cli-192500228cc6e574.yaml0000664000567000056710000000141012701407102026520 0ustar jenkinsjenkins00000000000000--- features: - > [`blueprint bootstrap `_] keystone-manage now supports the bootstrap command on the CLI so that a keystone install can be initialized without the need of the admin_token filter in the paste-ini. security: - The use of admin_token filter is insecure compared to the use of a proper username/password. Historically the admin_token filter has been left enabled in Keystone after initialization due to the way CMS systems work. Moving to an out-of-band initialization using ``keystone-manage bootstrap`` will eliminate the security concerns around a static shared string that conveys admin access to keystone and therefore to the entire installation. keystone-9.0.0/releasenotes/notes/projects_as_domains-3ea8a58b4c2965e1.yaml0000664000567000056710000000051012701407102027464 0ustar jenkinsjenkins00000000000000--- features: - Domains are now represented as top level projects with the attribute `is_domain` set to true. Such projects will appear as parents for any previous top level projects. Projects acting as domains can be created, read, updated, and deleted via either the project API or the domain API (V3 only). keystone-9.0.0/releasenotes/notes/catalog-caching-12f2532cfb71325a.yaml0000664000567000056710000000046412701407102026351 0ustar jenkinsjenkins00000000000000--- features: - > [`bug 1489061 `_] Caching has been added to catalog retrieval on a per user ID and project ID basis. This affects both the v2 and v3 APIs. As a result this should provide a performance benefit to fernet-based deployments. keystone-9.0.0/releasenotes/notes/enable-inherit-on-default-54ac435230261a6a.yaml0000664000567000056710000000067512701407102030207 0ustar jenkinsjenkins00000000000000--- upgrade: - > The default setting for the `os_inherit` configuration option is changed to True. If it is required to continue with this portion of the API disabled, then override the default setting by explicitly specifying the os_inherit option as False. deprecations: - The `os_inherit` configuration option is disabled. In the future, this option will be removed and this portion of the API will be always enabled. keystone-9.0.0/releasenotes/notes/ldap-conn-pool-enabled-90df94652f1ded53.yaml0000664000567000056710000000051112701407102027656 0ustar jenkinsjenkins00000000000000--- upgrade: - > The configuration options for LDAP connection pooling, `[ldap] use_pool` and `[ldap] use_auth_pool`, are now both enabled by default. Only deployments using LDAP drivers are affected. Additional configuration options are available in the `[ldap]` section to tune connection pool size, etc. keystone-9.0.0/releasenotes/notes/bug-1519210-de76097c974f9c93.yaml0000664000567000056710000000044412701407102025010 0ustar jenkinsjenkins00000000000000--- features: - > [`bug 1519210 `_] A user may now opt-out of notifications by specifying a list of event types using the `notification_opt_out` option in `keystone.conf`. These events are never sent to a messaging service. keystone-9.0.0/releasenotes/notes/migration_squash-f655329ddad7fc2a.yaml0000664000567000056710000000023112701407102027153 0ustar jenkinsjenkins00000000000000--- upgrade: - > [`bug 1541092 `_] Only database upgrades from Kilo and newer are supported. keystone-9.0.0/releasenotes/notes/totp-40d93231714c6a20.yaml0000664000567000056710000000073112701407102024167 0ustar jenkinsjenkins00000000000000--- features: - > [`blueprint totp-auth `_] Keystone now supports authenticating via Time-based One-time Password (TOTP). To enable this feature, add the ``totp`` auth plugin to the `methods` option in the `[auth]` section of `keystone.conf`. More information about using TOTP can be found in `keystone's developer documentation `_. keystone-9.0.0/releasenotes/notes/ldap-emulation-91c4d535eb9c3d10.yaml0000664000567000056710000000055612701407102026354 0ustar jenkinsjenkins00000000000000--- features: - > [`bug 1515302 `_] Two new configuration options have been added to the `[ldap]` section. `user_enabled_emulation_use_group_config` and `project_enabled_emulation_use_group_config`, which allow deployers to choose if they want to override the default group LDAP schema option. keystone-9.0.0/releasenotes/notes/admin_token-a5678d712783c145.yaml0000664000567000056710000000124412701407102025517 0ustar jenkinsjenkins00000000000000--- upgrade: - > [`bug 1473553 `_] The `keystone-paste.ini` must be updated to put the ``admin_token_auth`` middleware before ``build_auth_context``. See the sample `keystone-paste.ini` for the correct `pipeline` value. Having ``admin_token_auth`` after ``build_auth_context`` is deprecated and will not be supported in a future release. deprecations: - > [`blueprint deprecated-as-of-mitaka `_] The ``admin_token_auth`` filter must now be placed before the ``build_auth_context`` filter in `keystone-paste.ini`. keystone-9.0.0/releasenotes/notes/Role_V9_driver-971c3aae14d9963d.yaml0000664000567000056710000000044412701407102026300 0ustar jenkinsjenkins00000000000000--- deprecations: - > [`blueprint deprecated-as-of-mitaka `_] The V8 Role driver interface is deprecated. Support for the V8 Role driver interface is planned to be removed in the 'O' release of OpenStack. keystone-9.0.0/releasenotes/notes/bp-url-safe-naming-ad90d6a659f5bf3c.yaml0000664000567000056710000000043612701407102027173 0ustar jenkinsjenkins00000000000000--- features: - > [`blueprint url-safe-naming `_] The names of projects and domains can optionally be ensured to be url safe, to support the future ability to specify projects using hierarchical naming. keystone-9.0.0/releasenotes/notes/s3-aws-v4-c6cb75ce8d2289d4.yaml0000664000567000056710000000027412701407102025200 0ustar jenkinsjenkins00000000000000--- features: - > [`bug 1473042 `_] Keystone's S3 compatibility support can now authenticate using AWS Signature Version 4. keystone-9.0.0/releasenotes/notes/is-admin-24b34238c83b3a82.yaml0000664000567000056710000000140712701407102024773 0ustar jenkinsjenkins00000000000000--- features: - > [`bug 96869 `_] A pair of configuration options have been added to the ``[resource]`` section to specify a special ``admin`` project: ``admin_project_domain_name`` and ``admin_project_name``. If these are defined, any scoped token issued for that project will have an additional identifier ``is_admin_project`` added to the token. This identifier can then be checked by the policy rules in the policy files of the services when evaluating access control policy for an API. Keystone does not yet support the ability for a project acting as a domain to be the admin project. That will be added once the rest of the code for projects acting as domains is merged. keystone-9.0.0/releasenotes/notes/request_context-e143ba9c446a5952.yaml0000664000567000056710000000045712701407102026620 0ustar jenkinsjenkins00000000000000--- features: - > [`bug 1500222 `_] Added information such as: user ID, project ID, and domain ID to log entries. As a side effect of this change, both the user's domain ID and project's domain ID are now included in the auth context. keystone-9.0.0/releasenotes/notes/oslo.cache-a9ce47bfa8809efa.yaml0000664000567000056710000000136512701407102025714 0ustar jenkinsjenkins00000000000000--- upgrade: - > Keystone now uses oslo.cache. Update the `[cache]` section of `keystone.conf` to point to oslo.cache backends: ``oslo_cache.memcache_pool`` or ``oslo_cache.mongo``. Refer to the sample configuration file for examples. See `oslo.cache `_ for additional documentation. deprecations: - > [`blueprint deprecated-as-of-mitaka `_] ``keystone.common.cache.backends.memcache_pool``, ``keystone.common.cache.backends.mongo``, and ``keystone.common.cache.backends.noop`` are deprecated in favor of oslo.cache backends. The keystone backends will be removed in the 'O' release. keystone-9.0.0/releasenotes/source/0000775000567000056710000000000012701407246020516 5ustar jenkinsjenkins00000000000000keystone-9.0.0/releasenotes/source/index.rst0000664000567000056710000000020212701407105022343 0ustar jenkinsjenkins00000000000000======================== Keystone Release Notes ======================== .. toctree:: :maxdepth: 1 liberty unreleased keystone-9.0.0/releasenotes/source/_templates/0000775000567000056710000000000012701407246022653 5ustar jenkinsjenkins00000000000000keystone-9.0.0/releasenotes/source/_templates/.placeholder0000664000567000056710000000000012701407102025113 0ustar jenkinsjenkins00000000000000keystone-9.0.0/releasenotes/source/unreleased.rst0000664000567000056710000000016012701407102023363 0ustar jenkinsjenkins00000000000000============================== Current Series Release Notes ============================== .. release-notes:: keystone-9.0.0/releasenotes/source/liberty.rst0000664000567000056710000000022212701407102022705 0ustar jenkinsjenkins00000000000000============================== Liberty Series Release Notes ============================== .. release-notes:: :branch: origin/stable/liberty keystone-9.0.0/releasenotes/source/conf.py0000664000567000056710000002170212701407102022006 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # Keystone Release Notes documentation build configuration file, created by # sphinx-quickstart on Tue Nov 3 17:40:50 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'oslosphinx', 'reno.sphinxext', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Keystone Release Notes' copyright = u'2015, Keystone Developers' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. import pbr.version keystone_version = pbr.version.VersionInfo('keystone') # The full version, including alpha/beta/rc tags. release = keystone_version.version_string_with_vcs() # The short X.Y version. version = keystone_version.canonical_version_string() # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'KeystoneReleaseNotesdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'KeystoneReleaseNotes.tex', u'Keystone Release Notes Documentation', u'Keystone Developers', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'keystonereleasenotes', u'Keystone Release Notes Documentation', [u'Keystone Developers'], 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'KeystoneReleaseNotes', u'Keystone Release Notes Documentation', u'Keystone Developers', 'KeystoneReleaseNotes', 'Identity, Authentication and Access Management for OpenStack.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False keystone-9.0.0/releasenotes/source/_static/0000775000567000056710000000000012701407246022144 5ustar jenkinsjenkins00000000000000keystone-9.0.0/releasenotes/source/_static/.placeholder0000664000567000056710000000000012701407102024404 0ustar jenkinsjenkins00000000000000keystone-9.0.0/README.rst0000664000567000056710000000261412701407102016206 0ustar jenkinsjenkins00000000000000================== OpenStack Keystone ================== Keystone provides authentication, authorization and service discovery mechanisms via HTTP primarily for use by projects in the OpenStack family. It is most commonly deployed as an HTTP interface to existing identity systems, such as LDAP. Developer documentation, the source of which is in ``doc/source/``, is published at: http://docs.openstack.org/developer/keystone/ The API specification and documentation are available at: http://specs.openstack.org/openstack/keystone-specs/ The canonical client library is available at: https://git.openstack.org/cgit/openstack/python-keystoneclient Documentation for cloud administrators is available at: http://docs.openstack.org/ The source of documentation for cloud administrators is available at: https://git.openstack.org/cgit/openstack/openstack-manuals Information about our team meeting is available at: https://wiki.openstack.org/wiki/Meetings/KeystoneMeeting Bugs and feature requests are tracked on Launchpad at: https://bugs.launchpad.net/keystone Future design work is tracked at: http://specs.openstack.org/openstack/keystone-specs/#identity-program-specifications Contributors are encouraged to join IRC (``#openstack-keystone`` on freenode): https://wiki.openstack.org/wiki/IRC For information on contributing to Keystone, see ``CONTRIBUTING.rst``. keystone-9.0.0/.mailmap0000664000567000056710000000264312701407102016142 0ustar jenkinsjenkins00000000000000# Format is: # # Joe Gordon Sirish Bitra sirish.bitra Sirish Bitra sirishbitra Sirish Bitra root Zhongyue Luo Chmouel Boudjnah Zhenguo Niu keystone-9.0.0/setup.py0000664000567000056710000000200412701407102016222 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT import setuptools # In python < 2.7.4, a lazy loading of package `pbr` will break # setuptools if some other modules registered functions in `atexit`. # solution from: http://bugs.python.org/issue15881#msg170215 try: import multiprocessing # noqa except ImportError: pass setuptools.setup( setup_requires=['pbr>=1.8'], pbr=True) keystone-9.0.0/test-requirements.txt0000664000567000056710000000222012701407105020754 0ustar jenkinsjenkins00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. hacking<0.11,>=0.10.0 pep257==0.7.0 # MIT License flake8-docstrings==0.2.1.post1 # MIT bashate>=0.2 # Apache-2.0 os-testr>=0.4.1 # Apache-2.0 # computes code coverage percentages coverage>=3.6 # Apache-2.0 # fixture stubbing fixtures>=1.3.1 # Apache-2.0/BSD # xml parsing lxml>=2.3 # BSD # mock object framework mock>=1.2 # BSD oslotest>=1.10.0 # Apache-2.0 # required to build documentation sphinx!=1.2.0,!=1.3b1,<1.3,>=1.1.2 # BSD # test wsgi apps without starting an http server WebTest>=2.0 # MIT # mox was removed in favor of mock. We should not re-enable this module. See # discussion: http://lists.openstack.org/pipermail/openstack-dev/2013-July/012484.html #mox>=0.5.3 python-subunit>=0.0.18 # Apache-2.0/BSD testrepository>=0.0.18 # Apache-2.0/BSD testtools>=1.4.0 # MIT # For documentation oslosphinx!=3.4.0,>=2.5.0 # Apache-2.0 reno>=0.1.1 # Apache2 tempest-lib>=0.14.0 # Apache-2.0 # Functional tests. requests!=2.9.0,>=2.8.1 # Apache-2.0