diff --git a/.gitreview b/.gitreview index e8ebbd0e6a..cd290fcebd 100644 --- a/.gitreview +++ b/.gitreview @@ -2,3 +2,4 @@ host=review.opendev.org port=29418 project=openstack/keystone.git +defaultbranch=stable/2025.1 diff --git a/bindep.txt b/bindep.txt index 6d3d592fb5..68b3b745f5 100644 --- a/bindep.txt +++ b/bindep.txt @@ -37,3 +37,9 @@ openldap2-devel [platform:suse] # Required for sphinx graphviz image generation graphviz [test doc] + +# otherwise, it fails to build psycopg2, python-ldap +build-essential [test] + +# otherwise, it fails to recognize pbr version +git [test] diff --git a/concourse_unit_test_task b/concourse_unit_test_task new file mode 100755 index 0000000000..02f1eab48c --- /dev/null +++ b/concourse_unit_test_task @@ -0,0 +1,27 @@ +#!/usr/bin/env bash + +set -ex + +export DEBIAN_FRONTEND=noninteractive +apt-get update +apt-get install -y \ + python3-venv +python3 -m venv $HOME/.local/tools +source $HOME/.local/tools/bin/activate +pip install -U \ + pip \ + bindep \ + tox + +cd source +apt-get install -y \ + $(bindep -b test) +git config --global --add safe.directory /source + +export UPPER_CONSTRAINTS_FILE=https://raw.githubusercontent.com/sapcc/requirements/stable/2025.1-m3/upper-constraints.txt +export \ + KEYSTONE_IDENTITY_BACKEND=ldap \ + KEYSTONE_CLEAR_LDAP=yes \ + ENABLE_LDAP_LIVE_TEST=true +tox -e py312 \ + --skip-missing-interpreters=false diff --git a/custom-requirements.txt b/custom-requirements.txt new file mode 100644 index 0000000000..ef0002da7e --- /dev/null +++ b/custom-requirements.txt @@ -0,0 +1,9 @@ +python-binary-memcached + +git+https://github.com/sapcc/raven-python.git@ccloud#egg=raven[flask] +git+https://github.com/sapcc/openstack-watcher-middleware.git#egg=watcher-middleware +git+https://github.com/funkyHat/py-radius@install_requires#egg=py-radius +git+https://github.com/sapcc/keystone-extensions.git@stable/2025.1-m3#egg=keystone-extensions +git+https://github.com/sapcc/openstack-rate-limit-middleware.git#egg=rate-limit-middleware + +passlib \ No newline at end of file diff --git a/doc/source/getting-started/policy_mapping.rst b/doc/source/getting-started/policy_mapping.rst index a7cb27cfa7..fab79be12d 100644 --- a/doc/source/getting-started/policy_mapping.rst +++ b/doc/source/getting-started/policy_mapping.rst @@ -245,6 +245,8 @@ identity:delete_application_credential DELETE /v3/users/{use identity:get_access_rule GET /v3/users/{user_id}/access_rules/{access_rule_id} identity:list_access_rules GET /v3/users/{user_id}/access_rules identity:delete_access_rule DELETE /v3/users/{user_id}/access_rules/{access_rule_id} +identity:s3tokens_validate POST /v3/s3tokens +identity:ec2tokens_validate POST /v3/es2tokens ========================================================= === diff --git a/keystone/api/_shared/authentication.py b/keystone/api/_shared/authentication.py index 6d58df2078..edbd48129c 100644 --- a/keystone/api/_shared/authentication.py +++ b/keystone/api/_shared/authentication.py @@ -56,7 +56,12 @@ def _check_and_set_default_scoping(auth_info, auth_context): default_project_id = user_ref.get('default_project_id') if not default_project_id: - # User has no default project. He shall get an unscoped token. + # User has no default project. They shall get an unscoped token. + msg = ( + "User %(user_id)s doesn't have a default project. " + " The token will be unscoped rather than scoped to a project." + ) + LOG.debug(msg, {'user_id': user_ref['id']}) return # make sure user's default project is legit before scoping to it @@ -226,7 +231,23 @@ def authenticate_for_token(auth=None): app_cred_id = None if 'application_credential' in method_names: token_auth = auth_info.auth['identity'] - app_cred_id = token_auth['application_credential']['id'] + try: + app_cred_id = token_auth['application_credential']['id'] + except KeyError: + # NOTE(bbobrov): see bug #1878438 on launchpad. + # There is no any good fix for it, at least with the current + # architecture. Lets just return a nice message that this + # is currently not supported. + LOG.warning( + 'Unsupported reauthentication attempt with a token ' + 'after authenticating with application credentials' + ) + raise exception.Unauthorized( + _( + 'Cannot reauthenticate with a token issued with ' + 'application credentials' + ) + ) # Do MFA Rule Validation for the user if not core.UserMFARulesValidator.check_auth_methods_against_rules( diff --git a/keystone/api/ec2tokens.py b/keystone/api/ec2tokens.py index 152fe15a79..4fad969df2 100644 --- a/keystone/api/ec2tokens.py +++ b/keystone/api/ec2tokens.py @@ -21,6 +21,7 @@ from keystone.api._shared import EC2_S3_Resource from keystone.api._shared import json_home_relations +from keystone.common import rbac_enforcer from keystone.common import render_token from keystone.common import utils from keystone import exception @@ -30,6 +31,9 @@ CRED_TYPE_EC2 = 'ec2' +ENFORCER = rbac_enforcer.RBACEnforcer + + class EC2TokensResource(EC2_S3_Resource.ResourceBase): @staticmethod def _check_signature(creds_ref, credentials): @@ -57,12 +61,14 @@ def _check_signature(creds_ref, credentials): else: raise exception.Unauthorized(_('EC2 signature not supplied.')) - @ks_flask.unenforced_api def post(self): """Authenticate ec2 token. POST /v3/ec2tokens """ + # Enforce RBAC in the same way as S3 tokens + ENFORCER.enforce_call(action='identity:ec2tokens_validate') + token = self.handle_authenticate() token_reference = render_token.render_token_response_from_model(token) resp_body = jsonutils.dumps(token_reference) diff --git a/keystone/api/projects.py b/keystone/api/projects.py index 7d343c4972..1f46397988 100644 --- a/keystone/api/projects.py +++ b/keystone/api/projects.py @@ -21,6 +21,7 @@ from keystone.common import json_home from keystone.common import provider_api from keystone.common import rbac_enforcer +from keystone.common import utils from keystone.common import validation as ks_validation import keystone.conf from keystone import exception @@ -189,7 +190,17 @@ def get(self): if t in flask.request.args: hints.add_filter(t, flask.request.args[t]) refs = PROVIDERS.resource_api.list_projects(hints=hints) - if self.oslo_context.domain_id: + + # the entries should not be filtered by domain_id if domain list was + # requested; otherwise all domains are getting filtered out + try: + is_domain_requested = utils.attr_as_boolean( + flask.request.args['is_domain'] + ) + except KeyError: + is_domain_requested = False + + if self.oslo_context.domain_id and not is_domain_requested: domain_id = self.oslo_context.domain_id filtered_refs = [ ref for ref in refs if ref['domain_id'] == domain_id diff --git a/keystone/api/s3tokens.py b/keystone/api/s3tokens.py index c9c8b895de..769a378f25 100644 --- a/keystone/api/s3tokens.py +++ b/keystone/api/s3tokens.py @@ -22,12 +22,15 @@ from keystone.api._shared import EC2_S3_Resource from keystone.api._shared import json_home_relations +from keystone.common import rbac_enforcer from keystone.common import render_token from keystone.common import utils from keystone import exception from keystone.i18n import _ from keystone.server import flask as ks_flask +ENFORCER = rbac_enforcer.RBACEnforcer + def _calculate_signature_v1(string_to_sign, secret_key): """Calculate a v1 signature. @@ -96,12 +99,14 @@ def _check_signature(creds_ref, credentials): message=_('Credential signature mismatch') ) - @ks_flask.unenforced_api def post(self): """Authenticate s3token. POST /v3/s3tokens """ + # Use standard Keystone policy enforcement for s3tokens access + ENFORCER.enforce_call(action='identity:s3tokens_validate') + token = self.handle_authenticate() token_reference = render_token.render_token_response_from_model(token) resp_body = jsonutils.dumps(token_reference) diff --git a/keystone/api/trusts.py b/keystone/api/trusts.py index 6a3f04e134..215c501256 100644 --- a/keystone/api/trusts.py +++ b/keystone/api/trusts.py @@ -224,7 +224,9 @@ def get(self): ) if not flask.request.args: # NOTE(morgan): Admin can list all trusts. - ENFORCER.enforce_call(action='admin_required') + # ENFORCER.enforce_call(action='admin_required') + # ccloud: only allow cloud-admins to list all trusts + ENFORCER.enforce_call(action='cloud_admin') if not flask.request.args: trusts += PROVIDERS.trust_api.list_trusts() diff --git a/keystone/api/users.py b/keystone/api/users.py index e428e82a06..35d3130b16 100644 --- a/keystone/api/users.py +++ b/keystone/api/users.py @@ -420,13 +420,14 @@ def post(self, user_id): POST /v3/users/{user_id}/credentials/OS-EC2 """ + tenant_id = self.request_body_json.get('tenant_id') + target = {} - target['credential'] = {'user_id': user_id} + target['credential'] = {'user_id': user_id, 'project_id': tenant_id} ENFORCER.enforce_call( action='identity:ec2_create_credential', target_attr=target ) PROVIDERS.identity_api.get_user(user_id) - tenant_id = self.request_body_json.get('tenant_id') PROVIDERS.resource_api.get_project(tenant_id) blob = { 'access': uuid.uuid4().hex, @@ -806,8 +807,12 @@ class UserAccessRuleListResource(ks_flask.ResourceBase): collection_key = 'access_rules' member_key = 'access_rule' - @validation.request_query_schema(app_cred_schema.index_request_query) - @validation.response_body_schema(app_cred_schema.rule_index_response_body) + @validation.request_query_schema( + app_cred_schema.access_rule_index_request_query + ) + @validation.response_body_schema( + app_cred_schema.access_rule_index_response_body + ) def get(self, user_id): """List access rules for user. @@ -830,8 +835,12 @@ class UserAccessRuleGetDeleteResource(ks_flask.ResourceBase): collection_key = 'access_rules' member_key = 'access_rule' - @validation.request_query_schema(app_cred_schema.rule_show_request_query) - @validation.response_body_schema(app_cred_schema.rule_show_response_body) + @validation.request_query_schema( + app_cred_schema.access_rule_show_request_query + ) + @validation.response_body_schema( + app_cred_schema.access_rule_show_response_body + ) def get(self, user_id, access_rule_id): """Get access rule resource. diff --git a/keystone/api/validation/__init__.py b/keystone/api/validation/__init__.py index 7b39ac2f99..827da56a8d 100644 --- a/keystone/api/validation/__init__.py +++ b/keystone/api/validation/__init__.py @@ -16,9 +16,15 @@ import typing as ty import flask +from oslo_log import log from oslo_serialization import jsonutils from keystone.api.validation import validators +import keystone.conf +from keystone import exception + +CONF = keystone.conf.CONF +LOG = log.getLogger(__name__) def validated(cls): @@ -138,26 +144,40 @@ def add_validator(func): def wrapper(*args, **kwargs): response = func(*args, **kwargs) - if schema is not None: - # In Flask it is not uncommon that the method return a tuple of - # body and the status code. In the runtime Keystone only return - # a body, but some of the used testtools do return a tuple. - if isinstance(response, tuple): - _body = response[0] - else: - _body = response - - # NOTE(stephenfin): If our response is an object, we need to - # serializer and deserialize to convert e.g. date-time - # to strings - _body = jsonutils.dump_as_bytes(_body) - - if _body == b"": - body = None + if CONF.api.response_validation == 'ignore': + # don't waste our time checking anything if we're ignoring + # schema errors + return response + + if schema is None: + return response + + # In Flask it is not uncommon that the method return a tuple of + # body and the status code. In the runtime Keystone only return + # a body, but some of the used testtools do return a tuple. + if isinstance(response, tuple): + _body = response[0] + else: + _body = response + + # NOTE(stephenfin): If our response is an object, we need to + # serializer and deserialize to convert e.g. date-time + # to strings + _body = jsonutils.dump_as_bytes(_body) + + if _body == b'': + body = None + else: + body = jsonutils.loads(_body) + + try: + _schema_validator(schema, body, args, kwargs, is_body=True) + except exception.SchemaValidationError: + if CONF.api.response_validation == 'warn': + LOG.exception('Schema failed to validate') else: - body = jsonutils.loads(_body) + raise - _schema_validator(schema, body, args, kwargs, is_body=True) return response wrapper._response_body_schema = schema diff --git a/keystone/application_credential/schema.py b/keystone/application_credential/schema.py index 886d567b52..04ef913830 100644 --- a/keystone/application_credential/schema.py +++ b/keystone/application_credential/schema.py @@ -65,16 +65,43 @@ "additionalProperties": False, } -# Query parameters of the `/users/{user_id}/access_rules` and -# `/application_credentials/{application_credential_id}` APIs -index_request_query: dict[str, Any] = { +# Query parameters of the `/users/{user_id}/access_rules` API +access_rule_index_request_query: dict[str, Any] = { "type": "object", - "properties": {}, - "additionalProperties": False, + "properties": { + # NOTE(stephenfin): We don't reuse _access_rules_properties since these + # filters are currently far less strict that the request body of the + # POST and PATCH operations. We may wish to change this in a future + # version. + "service": { + "type": "string", + "description": ( + "The service type identifier for the service that the " + "application is permitted to access." + ), + }, + "path": { + "type": "string", + "description": ( + "The API path that the application credential is " + "permitted to access." + ), + }, + "method": { + "type": "string", + "description": ( + "The request method that the application credential is " + "permitted to use for a given API endpoint." + ), + }, + }, + # TODO(stephenfin): Change this to False once we have schemas for all + # resources. Doing so will remove comparator (name__icontains) support. + "additionalProperties": True, } # Response of the `/access_rules` API -rule_index_response_body: dict[str, Any] = { +access_rule_index_response_body: dict[str, Any] = { "type": "object", "properties": { "access_rules": { @@ -89,15 +116,17 @@ # /access/rules/{access_rule_id} # GET request query parameters -rule_show_request_query: dict[str, Any] = { +access_rule_show_request_query: dict[str, Any] = { "type": "object", "properties": {}, - "additionalProperties": False, + # TODO(stephenfin): Change this to False once we have schemas for all + # resources. + "additionalProperties": True, } # Response of `/access_rules/{access_rule_id}` API returning # single access rule -rule_show_response_body: dict[str, Any] = { +access_rule_show_response_body: dict[str, Any] = { "type": "object", "description": "An access rule object.", "properties": {"access_rule": access_rule_schema}, @@ -216,7 +245,9 @@ ), } }, - "additionalProperties": False, + # TODO(stephenfin): Change this to False once we have schemas for all + # resources. Doing so will remove comparator (name__icontains) support. + "additionalProperties": True, } # Response of the `/application_credentials` API diff --git a/keystone/assignment/core.py b/keystone/assignment/core.py index 6fa4c9c971..bbc12eefde 100644 --- a/keystone/assignment/core.py +++ b/keystone/assignment/core.py @@ -15,20 +15,24 @@ """Main entry point into the Assignment service.""" import copy +import datetime import itertools from oslo_log import log +from oslo_utils import timeutils from keystone.common import cache from keystone.common import driver_hints from keystone.common import manager from keystone.common import provider_api from keystone.common.resource_options import options as ro_opt -import keystone.conf from keystone import exception from keystone.i18n import _ +from keystone.models import revoke_model from keystone import notifications +import keystone.conf + CONF = keystone.conf.CONF LOG = log.getLogger(__name__) PROVIDERS = provider_api.ProviderAPIs @@ -1293,6 +1297,32 @@ def delete_user_assignments(self, user_id): self.delete_system_grant_for_user(user_id, assignment['id']) COMPUTED_ASSIGNMENTS_REGION.invalidate() + def invalidate_user_cache_on_group_change(self, user_id): + """Invalidate user cache when group membership changes.""" + LOG.debug( + 'Revoking tokens for federated user %(user_id)s due to group ' + 'membership changes in the identity provider', + {'user_id': user_id}, + ) + + COMPUTED_ASSIGNMENTS_REGION.invalidate() + + # Create revocation event with -1 second to avoid race condition + # where a new token issued at the same second as the revocation + # event would be incorrectly revoked. This ensures only tokens + # issued before the group membership change are revoked. + issued_before = timeutils.utcnow().replace( + microsecond=0 + ) - datetime.timedelta(seconds=1) + PROVIDERS.revoke_api.revoke( + revoke_model.RevokeEvent( + user_id=user_id, issued_before=issued_before + ) + ) + + reason = f'User {user_id} group membership changed' + notifications.invalidate_token_cache_notification(reason) + def check_system_grant_for_user(self, user_id, role_id): """Check if a user has a specific role on the system. diff --git a/keystone/assignment/schema.py b/keystone/assignment/schema.py index 55b9f41015..8479b8c463 100644 --- a/keystone/assignment/schema.py +++ b/keystone/assignment/schema.py @@ -61,7 +61,9 @@ "name": parameter_types.name, "domain_id": parameter_types.domain_id, }, - "additionalProperties": False, + # TODO(stephenfin): Change this to False once we have schemas for all + # resources. Doing so will remove comparator (name__icontains) support. + "additionalProperties": True, } # Response body of the `GET /roles` API operation @@ -399,7 +401,9 @@ ] }, "not": {"required": ["effective", "group"]}, - "additionalProperties": False, + # TODO(stephenfin): Change this to False once we have schemas for all + # resources. Doing so will remove comparator (user.id__icontains) support. + "additionalProperties": True, } # Response body of the `GET /role_assignments` API operation diff --git a/keystone/catalog/schema.py b/keystone/catalog/schema.py index 418ecfe443..f0451c9ce0 100644 --- a/keystone/catalog/schema.py +++ b/keystone/catalog/schema.py @@ -98,6 +98,8 @@ service_index_request_query: dict[str, Any] = { "type": "object", "properties": {}, + # TODO(stephenfin): Change this to False once we have schemas for all + # resources. "additionalProperties": True, } @@ -254,7 +256,10 @@ "endpoint belongs", }, }, - "additionalProperties": False, + # TODO(stephenfin): Change this to False once we have schemas for all + # resources. Doing so will remove comparator (interface__icontains) + # support. + "additionalProperties": True, } # Response of the `/endpoints` API @@ -275,7 +280,9 @@ endpoint_request_query: dict[str, Any] = { "type": "object", "properties": {}, - "additionalProperties": False, + # TODO(stephenfin): Change this to False once we have schemas for all + # resources. + "additionalProperties": True, } # Response of the `/endpoints` API returning a single endpoint @@ -403,6 +410,8 @@ "description": "The name of the endpoint group.", } }, + # TODO(stephenfin): Change this to False once we have schemas for all + # resources. Doing so will remove comparator (name__icontains) support. "additionalProperties": False, } diff --git a/keystone/cmd/cli.py b/keystone/cmd/cli.py index 93bf024144..8982ef5102 100644 --- a/keystone/cmd/cli.py +++ b/keystone/cmd/cli.py @@ -439,7 +439,7 @@ def check_db_sync_status(cls): except db_exception.DBMigrationError: contract_version = None - heads = upgrades.get_current_heads() + heads = upgrades.get_head_revisions() if ( upgrades.EXPAND_BRANCH not in heads diff --git a/keystone/cmd/doctor/tokens.py b/keystone/cmd/doctor/tokens.py index 547e13ec7c..cd62a22808 100644 --- a/keystone/cmd/doctor/tokens.py +++ b/keystone/cmd/doctor/tokens.py @@ -31,4 +31,6 @@ def symptom_unreasonable_max_token_size(): depending on the IDs returned from LDAP, resulting in longer Fernet tokens (adjust your `max_token_size` accordingly). """ - return 'fernet' in CONF.token.provider and CONF.max_token_size > 255 + # return 'fernet' in CONF.token.provider and CONF.max_token_size > 255 + # CCloud: we are using ldap, hence the max_token_size needs to go up. + return 'fernet' in CONF.token.provider and CONF.max_token_size > 268 diff --git a/keystone/common/fernet_utils.py b/keystone/common/fernet_utils.py index 36dd70671b..b3f051fe91 100644 --- a/keystone/common/fernet_utils.py +++ b/keystone/common/fernet_utils.py @@ -12,7 +12,7 @@ import base64 import os -import stat +# import stat from cryptography import fernet from oslo_log import log @@ -61,16 +61,18 @@ def validate_key_repository(self, requires_write=False): 'config_group': self.config_group, }, ) - else: - # ensure the key repository isn't world-readable - stat_info = os.stat(self.key_repository) - if ( - stat_info.st_mode & stat.S_IROTH - or stat_info.st_mode & stat.S_IXOTH - ): - LOG.warning( - 'key_repository is world readable: %s', self.key_repository - ) + # CCloud: we don't care, since k8s volumes from secrets can't be + # configured to be read-only + # else: + # # ensure the key repository isn't world-readable + # stat_info = os.stat(self.key_repository) + # if ( + # stat_info.st_mode & stat.S_IROTH + # or stat_info.st_mode & stat.S_IXOTH + # ): + # LOG.warning( + # 'key_repository is world readable: %s', self.key_repository + # ) return is_valid diff --git a/keystone/common/password_hashing.py b/keystone/common/password_hashing.py index 1554f4ecd9..290f920f14 100644 --- a/keystone/common/password_hashing.py +++ b/keystone/common/password_hashing.py @@ -13,9 +13,12 @@ # License for the specific language governing permissions and limitations # under the License. +import base64 +import hmac import itertools from oslo_log import log +import passlib.hash from keystone.common import password_hashers from keystone.common.password_hashers import bcrypt @@ -34,7 +37,11 @@ ) ) -_HASHER_NAME_MAP = {hasher.name: hasher for hasher in SUPPORTED_HASHERS} +DEPRECATED_HASHERS = frozenset([passlib.hash.sha512_crypt]) + +_HASHER_NAME_MAP = { + hasher.name: hasher for hasher in SUPPORTED_HASHERS | DEPRECATED_HASHERS +} # NOTE(notmorgan): Build the list of prefixes. This comprehension builds @@ -61,7 +68,9 @@ def _get_hash_ident(hashers): for module, prefix in itertools.chain( *[ zip([mod] * len(ident), ident) - for mod, ident in _get_hash_ident(SUPPORTED_HASHERS) + for mod, ident in _get_hash_ident( + SUPPORTED_HASHERS | DEPRECATED_HASHERS + ) ] ) } @@ -132,6 +141,11 @@ def check_password(password: str, hashed: str) -> bool: return hasher.verify(password_utf8, hashed) +def is_deprecated_hash(hashed): + """Check if the password is using a deprecated hash.""" + return _get_hasher_from_ident(hashed) in DEPRECATED_HASHERS + + def hash_user_password(user): """Hash a user dict's password without modifying the passed-in dict.""" password = user.get('password') @@ -178,3 +192,35 @@ def hash_password(password: str) -> str: _('Password Hash Algorithm %s not implemented') % CONF.identity.password_hash_algorithm ) + + +def generate_partial_password_hash(password: str, salt: str) -> str: + """Generates partial password hash for reporting purposes. + + The generated password hash is base64 encoded, and `max_chars` of it are + returned. + """ + secret_key = CONF.security_compliance.invalid_password_hash_secret_key + if secret_key is None: + raise RuntimeError(_('Secret Key value has to be provided')) + + hash_function = CONF.security_compliance.invalid_password_hash_function + + salted = hmac.digest( + key=bytes(salt, "utf-8"), + msg=bytes(password, "utf-8"), + digest=hash_function, + ) + + peppered = hmac.digest( + key=bytes(secret_key, "utf-8"), msg=salted, digest=hash_function + ) + + # encode to utilize more characters to reduce collisions when further + # truncating + encoded = base64.b64encode(peppered).decode("utf-8").rstrip("=") + + max_chars = CONF.security_compliance.invalid_password_hash_max_chars + if max_chars is None: + return encoded + return encoded[:max_chars] diff --git a/keystone/common/policies/__init__.py b/keystone/common/policies/__init__.py index 02608c185a..68842b50ce 100644 --- a/keystone/common/policies/__init__.py +++ b/keystone/common/policies/__init__.py @@ -22,6 +22,7 @@ from keystone.common.policies import domain from keystone.common.policies import domain_config from keystone.common.policies import ec2_credential +from keystone.common.policies import ec2tokens from keystone.common.policies import endpoint from keystone.common.policies import endpoint_group from keystone.common.policies import grant @@ -40,6 +41,7 @@ from keystone.common.policies import revoke_event from keystone.common.policies import role from keystone.common.policies import role_assignment +from keystone.common.policies import s3tokens from keystone.common.policies import service from keystone.common.policies import service_provider from keystone.common.policies import token @@ -78,6 +80,8 @@ def list_rules(): revoke_event.list_rules(), role.list_rules(), role_assignment.list_rules(), + s3tokens.list_rules(), + ec2tokens.list_rules(), service.list_rules(), service_provider.list_rules(), token_revocation.list_rules(), diff --git a/keystone/common/policies/base.py b/keystone/common/policies/base.py index 97970e4e03..2fded16069 100644 --- a/keystone/common/policies/base.py +++ b/keystone/common/policies/base.py @@ -76,6 +76,9 @@ ) rules = [ + policy.RuleDefault( + name='cloud_admin', check_str='role:admin and is_admin_project:True' + ), policy.RuleDefault( name='admin_required', check_str='role:admin or is_admin:1' ), diff --git a/keystone/common/policies/ec2tokens.py b/keystone/common/policies/ec2tokens.py new file mode 100644 index 0000000000..7f5f4e21f4 --- /dev/null +++ b/keystone/common/policies/ec2tokens.py @@ -0,0 +1,34 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_policy import policy + +from keystone.common.policies import base + +# Align EC2 tokens API with S3 tokens: require admin or service users +ADMIN_OR_SERVICE = 'rule:service_or_admin' + + +ec2tokens_policies = [ + policy.DocumentedRuleDefault( + name=base.IDENTITY % 'ec2tokens_validate', + check_str=ADMIN_OR_SERVICE, + scope_types=['system', 'domain', 'project'], + description='Validate EC2 credentials and create a Keystone token. ' + 'Restricted to service users or administrators.', + operations=[{'path': '/v3/ec2tokens', 'method': 'POST'}], + ) +] + + +def list_rules(): + return ec2tokens_policies diff --git a/keystone/common/policies/s3tokens.py b/keystone/common/policies/s3tokens.py new file mode 100644 index 0000000000..96088f62e2 --- /dev/null +++ b/keystone/common/policies/s3tokens.py @@ -0,0 +1,35 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_policy import policy + +from keystone.common.policies import base + +# S3 tokens API requires service authentication to prevent presigned URL exploitation +# This policy restricts access to service users or administrators only +ADMIN_OR_SERVICE = 'rule:service_or_admin' + +s3tokens_policies = [ + policy.DocumentedRuleDefault( + name=base.IDENTITY % 's3tokens_validate', + check_str=ADMIN_OR_SERVICE, + scope_types=['system', 'domain', 'project'], + description='Validate S3 credentials and create a Keystone token. ' + 'Restricted to service users or administrators to prevent ' + 'exploitation via presigned URLs.', + operations=[{'path': '/v3/s3tokens', 'method': 'POST'}], + ) +] + + +def list_rules(): + return s3tokens_policies diff --git a/keystone/common/render_token.py b/keystone/common/render_token.py index dbaab32c4b..06d511830f 100644 --- a/keystone/common/render_token.py +++ b/keystone/common/render_token.py @@ -94,6 +94,19 @@ def render_token_response_from_model(token, include_catalog=True): and ap_domain_name == token.project_domain['name'] ) token_reference['token']['is_admin_project'] = is_ap + # ccloud: additional bootstrap project support + bootstrap_name = CONF.resource.bootstrap_project_name + bootstrap_domain_name = CONF.resource.bootstrap_project_domain_name + if ( + bootstrap_name + and bootstrap_domain_name + and not token_reference['token'].get('is_admin_project', False) + ): + is_ap = ( + token.project['name'] == bootstrap_name + and bootstrap_domain_name == token.project_domain['name'] + ) + token_reference['token']['is_admin_project'] = is_ap if include_catalog and not token.unscoped: user_id = token.user_id if token.trust_id: diff --git a/keystone/common/sql/upgrades.py b/keystone/common/sql/upgrades.py index acfb5b7af3..b18373563d 100644 --- a/keystone/common/sql/upgrades.py +++ b/keystone/common/sql/upgrades.py @@ -19,6 +19,7 @@ from alembic import command as alembic_api from alembic import config as alembic_config from alembic import migration as alembic_migration +from alembic.runtime import environment as alembic_environment from alembic import script as alembic_script from oslo_db import exception as db_exception from oslo_log import log as logging @@ -105,6 +106,11 @@ def _get_current_heads(engine, config): context = alembic_migration.MigrationContext.configure(conn) heads = context.get_current_heads() + heads_map = _get_head_maps(heads, script) + return heads_map + + +def _get_head_maps(heads, script): heads_map = {} for head in heads: @@ -142,6 +148,17 @@ def get_current_heads(): return heads +def get_head_revisions(): + """Get the available head for each the expand and contract branches.""" + config = _find_alembic_conf() + script = alembic_script.ScriptDirectory.from_config(config) + context = alembic_environment.EnvironmentContext(config, script) + heads = context.get_head_revisions() + heads_map = _get_head_maps(heads, script) + + return heads_map + + def _is_database_under_alembic_control(engine): with engine.connect() as conn: context = alembic_migration.MigrationContext.configure(conn) diff --git a/keystone/conf/__init__.py b/keystone/conf/__init__.py index e7e7a062db..edaa557afd 100644 --- a/keystone/conf/__init__.py +++ b/keystone/conf/__init__.py @@ -19,6 +19,7 @@ from oslo_middleware import cors from osprofiler import opts as profiler +from keystone.conf import api from keystone.conf import application_credential from keystone.conf import assignment from keystone.conf import auth @@ -54,8 +55,8 @@ CONF = cfg.CONF - conf_modules = [ + api, application_credential, assignment, auth, @@ -90,7 +91,6 @@ wsgi, ] - oslo_messaging.set_transport_defaults(control_exchange='keystone') diff --git a/keystone/conf/api.py b/keystone/conf/api.py new file mode 100644 index 0000000000..b9690fc9e4 --- /dev/null +++ b/keystone/conf/api.py @@ -0,0 +1,58 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg + +from keystone.conf import utils + +GROUP_NAME = __name__.split('.')[-1] + +ALL_OPTS = [ + cfg.StrOpt( + "response_validation", + choices=( + ( + 'error', + 'Raise a HTTP 500 (Server Error) for responses that fail ' + 'schema validation', + ), + ( + 'warn', + 'Log a warning for responses that fail schema validation', + ), + ('ignore', 'Ignore schema validation failures'), + ), + default='warn', + help=utils.fmt( + """ +Configure validation of API responses. + +``warn`` is the current recommendation for production environments. If you find +it necessary to enable the ``ignore`` option, please report the issues you are +seeing to the Keystone team so we can improve our schemas. + +``error`` should not be used in a production environment. This is because +schema validation happens *after* the response body has been generated, meaning +any side effects will still happen and the call may be non-idempotent despite +the user receiving a HTTP 500 error. +""" + ), + ) +] + + +def register_opts(conf): + conf.register_opts(ALL_OPTS, group=GROUP_NAME) + + +def list_opts(): + return {GROUP_NAME: ALL_OPTS} diff --git a/keystone/conf/default.py b/keystone/conf/default.py index 145799d39c..035317f816 100644 --- a/keystone/conf/default.py +++ b/keystone/conf/default.py @@ -147,6 +147,18 @@ ), ) +default_tag = cfg.MultiStrOpt( + 'default_tag', + default=[], + help=utils.fmt(""" +Default `tag`(s) for newly created projects. If left undefined, Keystone will +create projects without an initial tag. This field can be set multiple times in +order to set multiple default tags, for example: +default_tag=tag_0 +default_tag=tag_1 +"""), +) + notification_format = cfg.StrOpt( 'notification_format', default='cadf', @@ -195,6 +207,7 @@ strict_password_check, insecure_debug, default_publisher_id, + default_tag, notification_format, notification_opt_out, ] diff --git a/keystone/conf/resource.py b/keystone/conf/resource.py index 659cd339ab..471ae190ad 100644 --- a/keystone/conf/resource.py +++ b/keystone/conf/resource.py @@ -85,6 +85,29 @@ ), ) +# CCloud: added bootstrap cloud-admin project & domain +bootstrap_project_domain_name = cfg.StrOpt( + 'bootstrap_project_domain_name', + help=utils.fmt(""" +Name of the domain that owns the `bootstrap_project_name`. If left unset, then +there is no admin project. `[resource] bootstrap_project_name` must also be set +to use this option. +"""), +) + +bootstrap_project_name = cfg.StrOpt( + 'bootstrap_project_name', + help=utils.fmt(""" +This is a special project which represents cloud-level administrator privileges +across services. Tokens scoped to this project will contain a true +`is_admin_project` attribute to indicate to policy systems that the role +assignments on that specific project should apply equally across every project. +If left unset, then there is no admin project, and thus no explicit means of +cross-project role assignments. `[resource] bootstrap_project_domain_name` must +also be set to use this option. +"""), +) + project_name_url_safe = cfg.StrOpt( 'project_name_url_safe', choices=['off', 'new', 'strict'], @@ -117,6 +140,7 @@ GROUP_NAME = __name__.split('.')[-1] +# CCloud: added bootstrap cloud-admin project & domain ALL_OPTS = [ driver, caching, @@ -124,6 +148,8 @@ list_limit, admin_project_domain_name, admin_project_name, + bootstrap_project_domain_name, + bootstrap_project_name, project_name_url_safe, domain_name_url_safe, ] diff --git a/keystone/conf/security_compliance.py b/keystone/conf/security_compliance.py index 50bf809164..4e075a1bbc 100644 --- a/keystone/conf/security_compliance.py +++ b/keystone/conf/security_compliance.py @@ -150,6 +150,79 @@ ), ) +report_invalid_password_hash = cfg.ListOpt( + 'report_invalid_password_hash', + default="", + sample_default="event", + item_type=cfg.types.String( + choices=[ + ( + "event", + utils.fmt(""" +Enriches `identity.authenticate.failure` event notifications with partial +invalid password hash +"""), + ) + # ("log", "description"), + ] + ), + help=utils.fmt( + """ +When configured, enriches the corresponding output channel with hash of invalid +password, which could be further used to distinguish bruteforce attacks from +e.g. external user automations that did not timely update rotated password by +analyzing variability of the hash value. +Additional configuration parameters are available using other +`invalid_password_hash_*` configuration entires, that only take effect when +this option is activated. +""" + ), +) + +invalid_password_hash_secret_key = cfg.StrOpt( + 'invalid_password_hash_secret_key', + secret=True, + help=utils.fmt( + """ +If `report_invalid_password_hash` is configured, uses provided secret key when +generating password hashes to make them unique and distinct from any other +Keystone installations out there. Should be some secret static value specific +to the current installation (the same value should be used in distributed +installations working with the same backend, to make them all generate equal +hashes for equal invalid passwords). 16 bytes (128 bits) or more is +recommended. +""" + ), +) + +invalid_password_hash_function = cfg.StrOpt( + 'invalid_password_hash_function', + default='sha256', + help=utils.fmt( + """ +If `report_invalid_password_hash` is configured, defines the hash function to +be used by HMAC. Possible values are names suitable to hashlib.new() - +https://docs.python.org/3/library/hashlib.html#hashlib.new. +""" + ), +) + +invalid_password_hash_max_chars = cfg.IntOpt( + 'invalid_password_hash_max_chars', + min=1, + sample_default=5, + help=utils.fmt( + """ +If `report_invalid_password_hash` is configured, defines the number of +characters of hash of invalid password to be returned. When not specified, +returns full hash. Its length depends on implementation and +`invalid_password_hash_function` configuration, but is typically 16+ +characters. It's recommended to use the least reasonable value however - it's +the most effective measure to protect the hashes. +""" + ), +) + GROUP_NAME = __name__.split('.')[-1] ALL_OPTS = [ @@ -162,6 +235,10 @@ password_regex, password_regex_description, change_password_upon_first_use, + report_invalid_password_hash, + invalid_password_hash_secret_key, + invalid_password_hash_function, + invalid_password_hash_max_chars, ] diff --git a/keystone/credential/schema.py b/keystone/credential/schema.py index 274fa57cda..75ffd631d1 100644 --- a/keystone/credential/schema.py +++ b/keystone/credential/schema.py @@ -32,7 +32,6 @@ }, "user_id": { "type": "string", - "format": "uuid", "description": "The ID of the user who owns the credential.", }, } @@ -59,7 +58,6 @@ "properties": { "user_id": { "type": "string", - "format": "uuid", "description": "Filters the response by a user ID.", }, "type": { @@ -70,6 +68,9 @@ ), }, }, + # TODO(stephenfin): Change this to False once we have schemas for all + # resources. Doing so will remove comparator (type__icontains) support. + "additionalProperties": True, } # Response of the `/credentials` API diff --git a/keystone/exception.py b/keystone/exception.py index 0eb209b9b0..707749aecd 100644 --- a/keystone/exception.py +++ b/keystone/exception.py @@ -70,6 +70,9 @@ class Error(Exception, metaclass=_KeystoneExceptionMeta): code: ty.Optional[int] = None title: ty.Optional[str] = None message_format: ty.Optional[str] = None + # ccloud: keep the original message for logging, regardless of + # insecure_debug setting + message = None def __init__(self, message=None, **kwargs): try: @@ -297,6 +300,11 @@ def __deepcopy__(self): def _build_message(self, message, **kwargs): """Only returns detailed messages in insecure_debug mode.""" + # ccloud: always keep original message for logging, regardless of + # insecure_debug setting + if message: + self.message = message + if message and CONF.insecure_debug: if isinstance(message, str): # Only do replacement if message is string. The message is diff --git a/keystone/federation/schema.py b/keystone/federation/schema.py index 3a47e134f7..c0fa8f6e8b 100644 --- a/keystone/federation/schema.py +++ b/keystone/federation/schema.py @@ -96,7 +96,9 @@ "description": "Whether the service provider is enabled or not", }, }, - "additionalProperties": False, + # TODO(stephenfin): Change this to False once we have schemas for all + # resources. Doing so will remove comparator (id__icontains) support. + "additionalProperties": True, } service_provider_index_response_body: dict[str, Any] = { @@ -211,6 +213,8 @@ "description": "Whether the identity provider is enabled or not", }, }, + # TODO(stephenfin): Change this to False once we have schemas for all + # resources. Doing so will remove comparator (name__icontains) support. "additionalProperties": True, } @@ -230,6 +234,8 @@ identity_provider_request_query: dict[str, Any] = { "type": "object", "properties": {}, + # TODO(stephenfin): Change this to False once we have schemas for all + # resources. "additionalProperties": True, } diff --git a/keystone/identity/backends/ldap/common.py b/keystone/identity/backends/ldap/common.py index 2212543872..4bc6c461af 100644 --- a/keystone/identity/backends/ldap/common.py +++ b/keystone/identity/backends/ldap/common.py @@ -28,8 +28,8 @@ from oslo_log import log from oslo_utils import reflection -from keystone.common import driver_hints from keystone import exception +from keystone.common import driver_hints from keystone.i18n import _ from keystone.identity.backends.ldap import models @@ -1244,7 +1244,8 @@ def _paged_search_s(self, base, scope, filterstr, attrlist=None): rtype, rdata, rmsgid, serverctrls = self.conn.result3(message) # Receive the data res.extend(rdata) - pctrls = [c for c in serverctrls if c.controlType == page_ctrl_oid] + # Handle case where serverctrls is None (e.g., in fake LDAP) + pctrls = [c for c in (serverctrls or []) if c.controlType == page_ctrl_oid] if pctrls: # LDAP server supports pagination if use_old_paging_api: @@ -1417,6 +1418,12 @@ def __init__(self, conf): self.auth_pool_conn_lifetime = conf.ldap.auth_pool_connection_lifetime if self.options_name is not None: + self.user_tree_dn = ( + conf.ldap.user_tree_dn + or f'{self.DEFAULT_OU},{conf.ldap.suffix}' + ) + self.user_objectclass = conf.ldap.user_objectclass + self.tree_dn = ( getattr(conf.ldap, f'{self.options_name}_tree_dn') or f'{self.DEFAULT_OU},{conf.ldap.suffix}' @@ -1823,7 +1830,6 @@ def _ldap_get_all(self, hints, ldap_filter=None): self.object_class, self.id_attr, ) - sizelimit = 0 attrs = list( set( [self.id_attr] @@ -1831,19 +1837,13 @@ def _ldap_get_all(self, hints, ldap_filter=None): + list(self.extra_attr_mapping.keys()) ) ) - if hints.limit: - sizelimit = hints.limit['limit'] - res = self._ldap_get_limited( - self.tree_dn, self.LDAP_SCOPE, query, attrs, sizelimit - ) - else: - with self.get_connection() as conn: - try: - res = conn.search_s( - self.tree_dn, self.LDAP_SCOPE, query, attrs - ) - except ldap.NO_SUCH_OBJECT: - return [] + with self.get_connection() as conn: + try: + res = conn.search_s( + self.tree_dn, self.LDAP_SCOPE, query, attrs + ) + except ldap.NO_SUCH_OBJECT: + return [] # TODO(prashkre): add functional testing for missing name attribute # on ldap entities. # NOTE(prashkre): Filter ldap search result to keep keystone away from @@ -1854,9 +1854,14 @@ def _ldap_get_all(self, hints, ldap_filter=None): return self._filter_ldap_result_by_attr(res, 'name') def _ldap_get_list( - self, search_base, scope, query_params=None, attrlist=None + self, + search_base, + scope, + query_params=None, + attrlist=None, + object_class=None, ): - query = f'(objectClass={self.object_class})' + query = f'(objectClass={object_class or self.object_class})' if query_params: def calc_filter(attrname, value): diff --git a/keystone/identity/backends/ldap/core.py b/keystone/identity/backends/ldap/core.py index 75251234ba..5ddf14d064 100644 --- a/keystone/identity/backends/ldap/core.py +++ b/keystone/identity/backends/ldap/core.py @@ -434,23 +434,26 @@ def list_group_users(self, group_id): """Return a list of user dns which are members of a group.""" group_ref = self.get(group_id) group_dn = group_ref['dn'] + attribute = self.member_attribute try: if self.group_ad_nesting: # NOTE(ayoung): LDAP_SCOPE is used here instead of hard- # coding to SCOPE_SUBTREE to get through the unit tests. # However, it is also probably more correct. + attribute = "distinguishedName" attrs = self._ldap_get_list( - self.tree_dn, + self.user_tree_dn, self.LDAP_SCOPE, query_params={ - f"member:{LDAP_MATCHING_RULE_IN_CHAIN}:": group_dn + f"memberOf:{LDAP_MATCHING_RULE_IN_CHAIN}:": group_dn }, - attrlist=[self.member_attribute], + attrlist=[attribute], + object_class=self.user_objectclass, ) else: attrs = self._ldap_get_list( - group_dn, ldap.SCOPE_BASE, attrlist=[self.member_attribute] + group_dn, ldap.SCOPE_BASE, attrlist=[attribute] ) except ldap.NO_SUCH_OBJECT: @@ -458,7 +461,7 @@ def list_group_users(self, group_id): users = [] for dn, member in attrs: - user_dns = member.get(self.member_attribute, []) + user_dns = member.get(attribute, []) for user_dn in user_dns: users.append(user_dn) return users diff --git a/keystone/identity/backends/sql.py b/keystone/identity/backends/sql.py index cea1c2aec4..f030bd5100 100644 --- a/keystone/identity/backends/sql.py +++ b/keystone/identity/backends/sql.py @@ -76,6 +76,8 @@ def authenticate(self, user_id, password): raise exception.UserDisabled(user_id=user_id) elif user_ref.password_is_expired: raise exception.PasswordExpired(user_id=user_id) + if password_hashing.is_deprecated_hash(user_ref.password): + self._rehash_password(user_id, password) # successful auth, reset failed count if present if user_ref.local_user.failed_auth_count: self._reset_failed_auth(user_id) @@ -127,6 +129,13 @@ def _reset_failed_auth(self, user_id): user_ref.local_user.failed_auth_count = 0 user_ref.local_user.failed_auth_at = None + def _rehash_password(self, user_id, password): + with sql.session_for_write() as session: + user_ref = session.get(model.User, user_id) + user_ref.password_ref.password_hash = ( + password_hashing.hash_password(password) + ) + # user crud @sql.handle_conflicts(conflict_type='user') diff --git a/keystone/identity/core.py b/keystone/identity/core.py index fd1b114902..031265dc4e 100644 --- a/keystone/identity/core.py +++ b/keystone/identity/core.py @@ -34,12 +34,13 @@ from keystone.common import manager from keystone.common import provider_api from keystone.common.validation import validators -import keystone.conf from keystone import exception from keystone.i18n import _ from keystone.identity.mapping_backends import mapping from keystone import notifications +import keystone.conf + CONF = keystone.conf.CONF LOG = log.getLogger(__name__) @@ -1755,18 +1756,55 @@ def shadow_federated_user(self, idp_id, protocol_id, user, group_ids=None): :returns: dictionary of the mapped User entity """ + user_already_existed = True + try: + PROVIDERS.shadow_users_api.get_federated_user( + idp_id, protocol_id, user['id'] + ) + except exception.UserNotFound: + user_already_existed = False + user_dict = self._shadow_federated_user(idp_id, protocol_id, user) # Note(knikolla): The shadowing operation can be cached, # however we need to update the expiring group memberships. - if group_ids: - for group_id in group_ids: - LOG.info( - "Adding user [%s] to group [%s].", user_dict, group_id - ) - PROVIDERS.shadow_users_api.add_user_to_group_expires( - user_dict['id'], group_id - ) + + if group_ids is None: + group_ids = [] + + membership_changed = False + + for group_id in group_ids: + LOG.info("Adding user [%s] to group [%s].", user_dict, group_id) + if PROVIDERS.shadow_users_api.add_user_to_group_expires( + user_dict['id'], group_id + ): + membership_changed = True + + removed_group_ids = ( + PROVIDERS.shadow_users_api.cleanup_stale_group_memberships( + user_dict['id'], idp_id, group_ids + ) + ) + + if removed_group_ids: + LOG.debug( + 'User %s was removed from groups %s, marking as membership changed', + user_dict['id'], + removed_group_ids, + ) + membership_changed = True + + if membership_changed and user_already_existed: + LOG.debug( + 'Group membership changed for federated user %s, ' + 'revoking tokens', + user_dict['id'], + ) + PROVIDERS.assignment_api.invalidate_user_cache_on_group_change( + user_dict['id'] + ) + return user_dict diff --git a/keystone/identity/schema.py b/keystone/identity/schema.py index 3edb238da2..f8688baae8 100644 --- a/keystone/identity/schema.py +++ b/keystone/identity/schema.py @@ -81,6 +81,8 @@ "sort_key": parameter_types.sort_key, "sort_dir": parameter_types.sort_dir, }, + # TODO(stephenfin): Change this to False once we have schemas for all + # resources. Doing so will remove comparator (name__icontains) support. "additionalProperties": True, } @@ -206,7 +208,9 @@ "sort_key": parameter_types.sort_key, "sort_dir": parameter_types.sort_dir, }, - "additionalProperties": False, + # TODO(stephenfin): Change this to False once we have schemas for all + # resources. Doing so will remove comparator (name__icontains) support. + "additionalProperties": True, } _group_properties: dict[str, Any] = { diff --git a/keystone/identity/shadow_backends/sql.py b/keystone/identity/shadow_backends/sql.py index a79c0687e1..3e2ca0149e 100644 --- a/keystone/identity/shadow_backends/sql.py +++ b/keystone/identity/shadow_backends/sql.py @@ -251,8 +251,11 @@ def get_federated_user(): membership = query.first() if membership: + # Existing membership, just refresh TTL membership.last_verified = timeutils.utcnow() + return False # No change, just renewal else: + # New membership - this is a change session.add( model.ExpiringUserGroupMembership( user_id=user_id, @@ -261,3 +264,23 @@ def get_federated_user(): last_verified=timeutils.utcnow(), ) ) + return True # New membership added + + def cleanup_stale_group_memberships( + self, user_id, idp_id, current_group_ids + ): + """Remove expiring group memberships that are no longer in the IdP assertion.""" + with sql.session_for_write() as session: + query = session.query(model.ExpiringUserGroupMembership) + query = query.filter_by(user_id=user_id, idp_id=idp_id) + existing_memberships = query.all() + + current_group_set = set(current_group_ids) + removed_group_ids = [] + + for membership in existing_memberships: + if membership.group_id not in current_group_set: + removed_group_ids.append(membership.group_id) + session.delete(membership) + + return removed_group_ids diff --git a/keystone/limit/schema.py b/keystone/limit/schema.py index 6bb0eeb8c5..5d51abd558 100644 --- a/keystone/limit/schema.py +++ b/keystone/limit/schema.py @@ -93,7 +93,10 @@ **parameter_types.name, }, }, - "additionalProperties": False, + # TODO(stephenfin): Change this to False once we have schemas for all + # resources. Doing so will remove comparator (service_id__icontains) + # support. + "additionalProperties": True, } # Response body of the `GET /registered_limits` API operation @@ -257,7 +260,10 @@ **parameter_types.domain_id, }, }, - "additionalProperties": False, + # TODO(stephenfin): Change this to False once we have schemas for all + # resources. Doing so will remove comparator (project_id__icontains) + # support. + "additionalProperties": True, } # Response body of the `GET /limits` API operation diff --git a/keystone/notifications.py b/keystone/notifications.py index 1df7e1569c..de66ce14db 100644 --- a/keystone/notifications.py +++ b/keystone/notifications.py @@ -24,6 +24,7 @@ import oslo_messaging from oslo_utils import reflection import pycadf +from pycadf import attachment from pycadf import cadftaxonomy as taxonomy from pycadf import cadftype from pycadf import credential @@ -33,6 +34,7 @@ from pycadf import resource from keystone.common import context +from keystone.common import password_hashing from keystone.common import provider_api from keystone.common import utils import keystone.conf @@ -71,6 +73,7 @@ 'OS-OAUTH1:request_token': taxonomy.SECURITY_CREDENTIAL, 'OS-OAUTH1:consumer': taxonomy.SECURITY_ACCOUNT, 'application_credential': taxonomy.SECURITY_CREDENTIAL, + 'credential': taxonomy.SECURITY_CREDENTIAL, } SAML_AUDIT_TYPE = 'http://docs.oasis-open.org/security/saml/v2.0' @@ -116,6 +119,13 @@ def build_audit_initiator(): if oslo_context.global_request_id: initiator.global_request_id = oslo_context.global_request_id + try: + token_info = flask.request.environ['keystone.token_info'] + app_cred_id = token_info['token']['application_credential']['id'] + initiator.application_credential_id = app_cred_id + except KeyError: + pass + return initiator @@ -723,7 +733,40 @@ def wrapper(wrapped_self, user_id, *args, **kwargs): if isinstance(ex, exception.AccountLocked): raise exception.Unauthorized raise - except Exception: + except Exception as ex: + audit_kwargs = {} + + if ( + isinstance(ex, AssertionError) + and kwargs.get("password") is not None + and "event" + in CONF.security_compliance.report_invalid_password_hash + ): + # Authentication failed because of invalid password, so we + # include partial pasword hash to aid bruteforce attacks + # recognition + partial_password_hash = ( + password_hashing.generate_partial_password_hash( + kwargs["password"], + # use fully qualified class name of the driver as + # personalization salt causing to produce different + # hashes for different backends even when the same + # invalid password is submitted + salt=reflection.get_class_name( + wrapped_self.driver + ), + ) + ) + attachments = audit_kwargs.get("attachments", []) + attachments.append( + attachment.Attachment( + typeURI="mime:text/plain", + content=partial_password_hash, + name="partial_password_hash", + ) + ) + audit_kwargs["attachments"] = attachments + # For authentication failure send a CADF event as well _send_audit_notification( self.action, @@ -731,6 +774,7 @@ def wrapper(wrapped_self, user_id, *args, **kwargs): taxonomy.OUTCOME_FAILURE, target, self.event_type, + **audit_kwargs, ) raise else: @@ -904,7 +948,14 @@ class _CatalogHelperObj(provider_api.ProviderAPIMixin): def _send_audit_notification( - action, initiator, outcome, target, event_type, reason=None, **kwargs + action, + initiator, + outcome, + target, + event_type, + reason=None, + attachments=None, + **kwargs, ): """Send CADF notification to inform observers about the affected resource. @@ -921,6 +972,7 @@ def _send_audit_notification( key-value pairs to the CADF event. :param reason: Reason for the notification which contains the response code and message description + :param attachments: Array of Attachment objects """ if _check_notification_opt_out(event_type, outcome): return @@ -951,6 +1003,10 @@ def _send_audit_notification( if service_id is not None: event.observer.id = service_id + attachments = attachments if attachments is not None else [] + for attachment_val in attachments: + event.add_attachment(attachment_val) + for key, value in kwargs.items(): setattr(event, key, value) diff --git a/keystone/oauth1/backends/sql.py b/keystone/oauth1/backends/sql.py index 03677e1392..a3b9b96339 100644 --- a/keystone/oauth1/backends/sql.py +++ b/keystone/oauth1/backends/sql.py @@ -207,7 +207,7 @@ def authorize_request_token(self, request_token_id, user_id, role_ids): token_dict['authorizing_user_id'] = user_id token_dict['verifier'] = ''.join( random.sample(base.VERIFIER_CHARS, 8) - ) + ) # nosec token_dict['role_ids'] = jsonutils.dumps(role_ids) new_token = RequestToken.from_dict(token_dict) diff --git a/keystone/resource/core.py b/keystone/resource/core.py index 0d554310c1..e473f23dcd 100644 --- a/keystone/resource/core.py +++ b/keystone/resource/core.py @@ -230,6 +230,16 @@ def create_project(self, project_id, project, initiator=None): project['name'] = project['name'].strip() project.setdefault('description', '') + # ccloud add default tag(s) + if CONF.default_tag: + default_tags = [x.strip() for x in CONF.default_tag] + if 'tags' in project: + # a user may have provided a tag, which is a default tag, again + # make unique by converting to a set + project['tags'] = list(set(project['tags'] + default_tags)) + else: + project['tags'] = default_tags + # For regular projects, the controller will ensure we have a valid # domain_id. For projects acting as a domain, the project_id # is, effectively, the domain_id - and for such projects we don't diff --git a/keystone/resource/schema.py b/keystone/resource/schema.py index c8a7ab3680..f19c4742c1 100644 --- a/keystone/resource/schema.py +++ b/keystone/resource/schema.py @@ -100,6 +100,9 @@ }, "limit": {"type": ["integer", "string"]}, }, + # TODO(stephenfin): Change this to False once we have schemas for all + # resources. Doing so will remove comparator (name__icontains) support. + "additionalProperties": True, } project_schema: dict[str, Any] = { @@ -208,7 +211,9 @@ }, "limit": {"type": ["integer", "string"]}, }, - "additionalProperties": "False", + # TODO(stephenfin): Change this to False once we have schemas for all + # resources. Doing so will remove comparator (name__icontains) support. + "additionalProperties": True, } domain_index_response_body: dict[str, Any] = { diff --git a/keystone/server/flask/application.py b/keystone/server/flask/application.py index ddde080cfa..f270dd0b0e 100644 --- a/keystone/server/flask/application.py +++ b/keystone/server/flask/application.py @@ -141,6 +141,11 @@ def _handle_keystone_exception(error): # Handle logging if isinstance(error, exception.Unauthorized): + # ccloud: log original message, regardless of insecure_debug setting + if error.message is not None: + message = error.message + else: + message = error LOG.warning( "Authorization failed. %(exception)s from %(remote_addr)s", {'exception': error, 'remote_addr': flask.request.remote_addr}, diff --git a/keystone/server/flask/core.py b/keystone/server/flask/core.py index 42d28468a8..afbed42468 100644 --- a/keystone/server/flask/core.py +++ b/keystone/server/flask/core.py @@ -25,11 +25,16 @@ from keystone.common import profiler import keystone.conf +from keystone import exception import keystone.server from keystone.server.flask import application from keystone.server.flask.request_processing.middleware import auth_context from keystone.server.flask.request_processing.middleware import url_normalize +# CCloud +import logging +from raven.contrib.flask import Sentry + # NOTE(morgan): Middleware Named Tuple with the following values: # * "namespace": namespace for the entry_point # * "ep": the entry-point name @@ -64,6 +69,32 @@ ), ) +# ccloud: additional ccloud specific middleware that needs to sit 'behind' the +# AuthContextMiddleware +_CC_MIDDLEWARE = ( + # CCloud: add watcher middleware + _Middleware( + namespace='watcher.middleware', + ep='watcher', + conf={ + 'service_type': 'identity', + 'config_file': '/etc/keystone/watcher.yaml', + 'include_initiator_user_id_in_metric': 'true', + 'include_target_project_id_in_metric': 'false', + }, + ), + # CCloud: add lifesaver middleware + _Middleware(namespace='lifesaver.middleware', ep='lifesaver', conf={}), + # CCloud: add rate_limit middleware + # _Middleware(namespace='rate_limit.middleware', + # ep='rate-limit', + # conf={'config_file': '/etc/keystone/ratelimit.yaml', + # 'service_type': 'identity', + # 'rate_limit_by': 'initiator_project_id', + # 'backend_host': 'keystone-sapcc-rate-limit', + # 'backend_timeout_seconds': '1'}), +) + # NOTE(morgan): ORDER HERE IS IMPORTANT! Each of these middlewares are # implemented/defined explicitly in Keystone Server. They do some level of # lifting to ensure the request is properly handled. It is importat to note @@ -103,6 +134,8 @@ def setup_app_middleware(app): MW = _APP_MIDDLEWARE IMW = _KEYSTONE_MIDDLEWARE + # ccloud + CMW = _CC_MIDDLEWARE # Add in optional (config-based) middleware # NOTE(morgan): Each of these may need to be in a specific location @@ -115,6 +148,18 @@ def setup_app_middleware(app): ), ) + _APP_MIDDLEWARE + # Apply ccloud Middleware. Need to sit 'behind' the AuthContextMiddleware + for mw in reversed(CMW): + # CCloud: optionally skip the watcher middleware (like in unit-tests) + if mw.ep == 'watcher' and os.environ.get('WATCHER_DISABLED', None): + continue + + loaded = stevedore.DriverManager( + mw.namespace, mw.ep, invoke_on_load=False + ) + factory_func = loaded.driver.factory({}, **mw.conf) + app.wsgi_app = factory_func(app.wsgi_app) + # Apply internal-only Middleware (e.g. AuthContextMiddleware). These # are below all externally loaded middleware in request processing. for mw in reversed(IMW): @@ -145,6 +190,36 @@ def setup_app_middleware(app): # Apply werkzeug specific middleware app.wsgi_app = proxy_fix.ProxyFix(app.wsgi_app) + + # CCloud + if os.environ.get('SENTRY_DSN', None): + processors = ( + 'raven.processors.SanitizePasswordsProcessor', + 'raven.processors.SanitizeKeysProcessor', + 'raven.processors.RemovePostDataProcessor', + ) + sanitize_keys = [ + 'old_password', + 'new_password', + 'password', + 'cred', + 'secret', + 'passwd', + 'credentials', + ] + app.config['SENTRY_CONFIG'] = { + 'ignore_exceptions': [ + exception.NotFound, + exception.Unauthorized, + 'INVALID_CREDENTIALS', + ], + 'processors': processors, + 'sanitize_keys': sanitize_keys, + } + + sentry = Sentry() + sentry.init_app(app, logging=True, level=logging.ERROR) + return app diff --git a/keystone/server/flask/request_processing/middleware/auth_context.py b/keystone/server/flask/request_processing/middleware/auth_context.py index c9938668e9..55728852c2 100644 --- a/keystone/server/flask/request_processing/middleware/auth_context.py +++ b/keystone/server/flask/request_processing/middleware/auth_context.py @@ -317,7 +317,8 @@ def _validate_trusted_issuer(self, request): 'Cannot find client issuer in env by the ' 'issuer attribute - %s.' ) - LOG.info(msg, CONF.tokenless_auth.issuer_attribute) + # ccloud: this is only relevant for debug sessions + LOG.debug(msg, CONF.tokenless_auth.issuer_attribute) return False if issuer in CONF.tokenless_auth.trusted_issuer: diff --git a/keystone/tests/unit/common/test_notifications.py b/keystone/tests/unit/common/test_notifications.py index 65fa13b1b1..ac932648db 100644 --- a/keystone/tests/unit/common/test_notifications.py +++ b/keystone/tests/unit/common/test_notifications.py @@ -25,6 +25,7 @@ from oslo_utils import timeutils from pycadf import cadftaxonomy from pycadf import cadftype +from pycadf import event as cadfevent from pycadf import eventfactory from pycadf import resource as cadfresource @@ -144,6 +145,8 @@ class NotificationsTestCase(unit.BaseTestCase): def setUp(self): super().setUp() self.config_fixture = self.useFixture(config_fixture.Config(CONF)) + # ccloud: fix random notification test failures + oslo_messaging.get_notification_transport(CONF, url='rabbit://') self.config_fixture.config( group='oslo_messaging_notifications', transport_url='rabbit://' ) @@ -299,6 +302,7 @@ def fake_audit( target, event_type, reason=None, + attachments=None, **kwargs, ): service_security = cadftaxonomy.SERVICE_SECURITY @@ -313,6 +317,10 @@ def fake_audit( observer=cadfresource.Resource(typeURI=service_security), ) + attachments = attachments if attachments is not None else [] + for attachment_val in attachments: + event.add_attachment(attachment_val) + for key, value in kwargs.items(): setattr(event, key, value) @@ -356,7 +364,13 @@ def _assert_last_note( self.assertEqual(actor_operation, note['actor_operation']) def _assert_last_audit( - self, resource_id, operation, resource_type, target_uri, reason=None + self, + resource_id, + operation, + resource_type, + target_uri, + reason=None, + attachments=None, ): # NOTE(stevemar): If 'cadf' format is not used, then simply # return since this assertion is not valid. @@ -384,6 +398,15 @@ def _assert_last_audit( reason['reasonType'], payload['reason']['reasonType'] ) self.assertTrue(audit['send_notification_called']) + if attachments is None: + self.assertNotIn(cadfevent.EVENT_KEYNAME_ATTACHMENTS, payload) + else: + self.assertIn(cadfevent.EVENT_KEYNAME_ATTACHMENTS, payload) + for attachment_val in attachments: + self.assertIn( + attachment_val, + payload.get(cadfevent.EVENT_KEYNAME_ATTACHMENTS), + ) def _assert_initiator_data_is_set(self, operation, resource_type, typeURI): self.assertGreater(len(self._audits), 0) @@ -1142,6 +1165,131 @@ def test_changing_password_too_early_sends_notification(self): reason=expected_reason, ) + def test_valid_password_not_hashed_by_default(self): + password = uuid.uuid4().hex + user_ref = unit.new_user_ref( + domain_id=self.domain_id, password=password + ) + user_ref = PROVIDERS.identity_api.create_user(user_ref) + + with mock.patch( + 'keystone.common.password_hashing.generate_partial_password_hash' + ) as mocked: + with self.make_request(): + PROVIDERS.identity_api.authenticate(user_ref['id'], password) + mocked.assert_not_called() + + self._assert_last_audit( + None, + 'authenticate', + None, + cadftaxonomy.ACCOUNT_USER, + attachments=None, + ) + + def test_invalid_password_not_hashed_by_default(self): + password = uuid.uuid4().hex + invalid_password = 'invalid_' + password + user_ref = unit.new_user_ref( + domain_id=self.domain_id, password=password + ) + user_ref = PROVIDERS.identity_api.create_user(user_ref) + + with mock.patch( + 'keystone.common.password_hashing.generate_partial_password_hash' + ) as mocked: + with self.make_request(): + self.assertRaises( + AssertionError, + PROVIDERS.identity_api.authenticate, + user_id=user_ref['id'], + password=invalid_password, + ) + mocked.assert_not_called() + + self._assert_last_audit( + None, + 'authenticate', + None, + cadftaxonomy.ACCOUNT_USER, + attachments=None, + ) + + def test_valid_password_hash_not_included_when_report_in_event(self): + password = uuid.uuid4().hex + user_ref = unit.new_user_ref( + domain_id=self.domain_id, password=password + ) + user_ref = PROVIDERS.identity_api.create_user(user_ref) + + conf = self.useFixture(config_fixture.Config(CONF)) + conf.config( + group='security_compliance', report_invalid_password_hash='event' + ) + conf.config( + group='security_compliance', + invalid_password_hash_secret_key='secret_key', + ) + with mock.patch( + 'keystone.common.password_hashing.generate_partial_password_hash' + ) as mocked: + with self.make_request(): + PROVIDERS.identity_api.authenticate(user_ref['id'], password) + mocked.assert_not_called() + + self._assert_last_audit( + None, + 'authenticate', + None, + cadftaxonomy.ACCOUNT_USER, + attachments=None, + ) + + def test_invalid_password_hash_included_when_report_in_event(self): + password = uuid.uuid4().hex + invalid_password = 'invalid_' + password + user_ref = unit.new_user_ref( + domain_id=self.domain_id, password=password + ) + user_ref = PROVIDERS.identity_api.create_user(user_ref) + + conf = self.useFixture(config_fixture.Config(CONF)) + conf.config( + group='security_compliance', report_invalid_password_hash='event' + ) + conf.config( + group='security_compliance', + invalid_password_hash_secret_key='secret_key', + ) + hashed_invalid_password = 'hashed_' + invalid_password + with mock.patch( + 'keystone.common.password_hashing.generate_partial_password_hash', + return_value=hashed_invalid_password, + ) as mocked: + with self.make_request(): + self.assertRaises( + AssertionError, + PROVIDERS.identity_api.authenticate, + user_id=user_ref['id'], + password=invalid_password, + ) + + mocked.assert_called_once() + + self._assert_last_audit( + None, + 'authenticate', + None, + cadftaxonomy.ACCOUNT_USER, + attachments=[ + { + 'content': hashed_invalid_password, + 'name': "partial_password_hash", + 'typeURI': "mime:text/plain", + } + ], + ) + class CADFNotificationsForEntities(NotificationsForEntities): def setUp(self): diff --git a/keystone/tests/unit/common/test_password_hashing.py b/keystone/tests/unit/common/test_password_hashing.py index 2c242f910c..584b176907 100644 --- a/keystone/tests/unit/common/test_password_hashing.py +++ b/keystone/tests/unit/common/test_password_hashing.py @@ -20,6 +20,8 @@ import keystone.conf from keystone.tests import unit +import passlib.hash + CONF = keystone.conf.CONF @@ -90,3 +92,181 @@ def test_pbkdf2_sha512(self): ) hashed = password_hashing.hash_password(password) self.assertTrue(password_hashing.check_password(password, hashed)) + + def test_sha512_crypt_passlib_compat(self): + self.config_fixture.config(strict_password_check=True) + # sha512_crypt is deprecated and is not supported to be set. + # We want to ensure that user is still able to login, thus + # set algo to whatever and go verify pwd + self.config_fixture.config( + group="identity", password_hash_algorithm="bcrypt" + ) + self.config_fixture.config(group="identity", max_password_length="72") + # few iterations to test multiple random values + for _ in range(self.ITERATIONS): + password: str = "".join( # type: ignore + secrets.choice(string.printable) + for i in range(random.randint(1, 72)) + ) + hashed_passlib = passlib.hash.sha512_crypt.hash(password) + self.assertTrue( + password_hashing.check_password(password, hashed_passlib) + ) + + +class TestGeneratePartialPasswordHash(unit.BaseTestCase): + def setUp(self): + super().setUp() + + self.h = password_hashing.generate_partial_password_hash + + self.config_fixture = self.useFixture(config_fixture.Config(CONF)) + self.config_fixture.config( + group="security_compliance", + invalid_password_hash_secret_key="secret_key", + ) + + def test_equal_input_generates_equal_hash(self): + args1 = ("password", "salt") + args2 = ("password", "salt") + + hashed1 = self.h(*args1) + hashed2 = self.h(*args2) + + self.assertEqual(hashed1, hashed2) + + def test_different_inputs_generate_different_hashes(self): + params = [ + { + "msg": "Different passwords", + "args1": ("password1", "salt"), + "args2": ("password2", "salt"), + }, + { + "msg": "Different salts", + "args1": ("password", "salt1"), + "args2": ("password", "salt2"), + }, + { + "msg": "Same but mixed inputs", + "args1": ("password", "salt"), + "args2": ("salt", "password"), + }, + ] + + # test variability of: a) full hash; b) partial hash + max_chars_conf = [None, 5] + for data in params: + for max_chars in max_chars_conf: + msg = data["msg"] + f" ({max_chars=})" + self.config_fixture.config( + group="security_compliance", + invalid_password_hash_max_chars=max_chars, + ) + with self.subTest(msg=msg): + hashed1 = self.h(*data["args1"]) + hashed2 = self.h(*data["args2"]) + self.assertNotEqual(hashed1, hashed2, msg=msg) + + def test_generates_full_hash_by_default(self): + args1 = ("password", "salt") + args2 = ("password", "salt") + + # when `max_chars` is not specified, a default value will be used, + # which is expected to return full hash + hashed1 = self.h(*args1) + + self.config_fixture.config( + group="security_compliance", + # 1000 is larger than any supported hash function key length + + # base64 encoding, so it should cause full hash to be returned + invalid_password_hash_max_chars=1000, + ) + hashed2 = self.h(*args2) + + self.assertEqual(len(hashed1), len(hashed2)) + + def test_invalid_function_raises_value_error(self): + args = ("password", "salt") + self.config_fixture.config( + group="security_compliance", + invalid_password_hash_function="invalid", + ) + + self.assertRaises(ValueError, self.h, *args) + + def test_large_passwords(self): + # 48042 bytes or 48 kB + args1 = ("p" * 1000 * 48 + "1", "salt") + args2 = ("p" * 1000 * 48 + "2", "salt") + + # test variability of: a) full hash; b) partial hash + max_chars_conf = [None, 5] + for max_chars in max_chars_conf: + msg = f"Large password ({max_chars=})" + self.config_fixture.config( + group="security_compliance", + invalid_password_hash_max_chars=max_chars, + ) + with self.subTest(msg=msg): + hashed1 = self.h(*args1) + hashed2 = self.h(*args2) + self.assertNotEqual(hashed1, hashed2, msg=msg) + + def test_different_conf_generates_different_hashes(self): + args = ("password", "salt") + + params = [ + { + "msg": "Different secret_keys", + "conf1": {"invalid_password_hash_secret_key": "secret_key1"}, + "conf2": {"invalid_password_hash_secret_key": "secret_key2"}, + }, + { + "msg": "Different max chars", + "conf1": {"invalid_password_hash_max_chars": 3}, + "conf2": {"invalid_password_hash_max_chars": 4}, + }, + { + "msg": "Different hash functions", + "conf1": {"invalid_password_hash_function": "sha3_512"}, + "conf2": {"invalid_password_hash_function": "sha512"}, + }, + ] + + for data in params: + with self.subTest(msg=data["msg"]): + self.config_fixture.config( + group="security_compliance", **data["conf1"] + ) + hashed1 = self.h(*args) + self.config_fixture.config( + group="security_compliance", **data["conf2"] + ) + hashed2 = self.h(*args) + self.assertNotEqual(hashed1, hashed2, msg=data["msg"]) + + def test_conf_secret_key_is_required(self): + self.config_fixture.config( + group="security_compliance", invalid_password_hash_secret_key=None + ) + + args = ("password", "salt") + + self.assertRaises(RuntimeError, self.h, *args) + + def test_returns_not_more_than_max_chars(self): + args = ("password", "salt") + + max_chars_conf = [1, 5, 1000] + for max_chars in max_chars_conf: + self.config_fixture.config( + group="security_compliance", + invalid_password_hash_max_chars=max_chars, + ) + with self.subTest(msg=max_chars): + hashed = self.h(*args) + + self.assertLessEqual( + len(hashed), max_chars, msg=f"{max_chars=}" + ) diff --git a/keystone/tests/unit/core.py b/keystone/tests/unit/core.py index e7ceb49c5d..5d2c4a1b96 100644 --- a/keystone/tests/unit/core.py +++ b/keystone/tests/unit/core.py @@ -304,6 +304,12 @@ def new_project_ref(domain_id=None, is_domain=False, **kwargs): return ref +def new_project_without_tags_ref(domain_id=None, is_domain=False, **kwargs): + ref = new_project_ref(domain_id=domain_id, is_domain=is_domain, **kwargs) + ref.pop('tags') + return ref + + def new_user_ref(domain_id, project_id=None, **kwargs): ref = { 'id': uuid.uuid4().hex, @@ -925,6 +931,8 @@ def config_overrides(self): # allowed in the `[identity] password_hash_rounds` setting self.config_fixture.config(group='identity', password_hash_rounds=4) + self.config_fixture.config(group='api', response_validation='error') + self.useFixture( ksfixtures.KeyRepository( self.config_fixture, diff --git a/keystone/tests/unit/identity/backends/test_ldap_common.py b/keystone/tests/unit/identity/backends/test_ldap_common.py index a2906c4cb2..a722de34c4 100644 --- a/keystone/tests/unit/identity/backends/test_ldap_common.py +++ b/keystone/tests/unit/identity/backends/test_ldap_common.py @@ -12,23 +12,20 @@ import os import tempfile -from unittest import mock import uuid +from unittest import mock import fixtures import ldap.dn from oslo_config import fixture as config_fixture -from keystone.common import driver_hints -from keystone.common import provider_api import keystone.conf from keystone import exception as ks_exception +from keystone.common import driver_hints, provider_api from keystone.identity.backends.ldap import common as common_ldap from keystone.tests import unit -from keystone.tests.unit import default_fixtures -from keystone.tests.unit import fakeldap -from keystone.tests.unit.ksfixtures import database -from keystone.tests.unit.ksfixtures import ldapdb +from keystone.tests.unit import default_fixtures, fakeldap +from keystone.tests.unit.ksfixtures import database, ldapdb CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs @@ -694,3 +691,97 @@ def test_search_s_sizelimit_exceeded(self, mock_search_s): 'dc=example,dc=test', ldap.SCOPE_SUBTREE, ) + + +class LDAPPaginationTest(unit.TestCase): + """Test LDAP pagination with more than page_size results. + + This test verifies the fix for issue #434 where Keystone could not list + more than 1000 users from AD without filters. The issue was that + _ldap_get_limited only fetched one page of results. + + Similar to Nova bug #2122109, we verify that all results are fetched + when pagination is enabled, not just the first page. + """ + + def setUp(self): + super().setUp() + + self.useFixture(ldapdb.LDAPDatabase()) + self.useFixture(database.Database()) + + self.load_backends() + self.load_fixtures(default_fixtures) + + def config_overrides(self): + super().config_overrides() + self.config_fixture.config(group='identity', driver='ldap') + # Set page_size to simulate pagination + self.config_fixture.config(group='ldap', page_size=100) + + def config_files(self): + config_files = super().config_files() + config_files.append(unit.dirs.tests_conf('backend_ldap.conf')) + return config_files + + def test_list_users_returns_all_entries_with_multiple_pages(self): + """Verify that listing users returns ALL results across multiple pages.""" + user_count = 250 + created_user_ids = [] + + for i in range(user_count): + user_ref = { + 'name': f'test_user_{i:04d}', + 'enabled': True, + 'domain_id': 'default', + } + user = PROVIDERS.identity_api.create_user(user_ref) + created_user_ids.append(user['id']) + + hints = driver_hints.Hints() + users = PROVIDERS.identity_api.list_users(hints=hints) + returned_user_ids = [u['id'] for u in users] + + for user_id in created_user_ids: + self.assertIn( + user_id, + returned_user_ids, + f"User {user_id} not found in returned users. " + f"Only {len(returned_user_ids)} users returned, expected at least {user_count}" + ) + + self.assertGreater( + len(users), + 100, + f"Expected more than 100 users (page_size), but got {len(users)}. " + "This suggests pagination is not working correctly." + ) + + def test_list_users_with_limit_hint_returns_all_pages(self): + """Verify that even with a limit hint, all pages are fetched.""" + user_count = 150 + created_user_ids = [] + + for i in range(user_count): + user_ref = { + 'name': f'limit_test_user_{i:04d}', + 'enabled': True, + 'domain_id': 'default', + } + user = PROVIDERS.identity_api.create_user(user_ref) + created_user_ids.append(user['id']) + + hints = driver_hints.Hints() + hints.set_limit(200) + + users = PROVIDERS.identity_api.list_users(hints=hints) + returned_user_ids = [u['id'] for u in users] + + found_count = sum(1 for uid in created_user_ids if uid in returned_user_ids) + + self.assertGreater( + found_count, + 100, + f"Expected more than 100 of our users (page_size), but only found {found_count}. " + "This suggests only one page was fetched." + ) diff --git a/keystone/tests/unit/identity/test_backend_sql.py b/keystone/tests/unit/identity/test_backend_sql.py index a811e6a32b..89067cf27b 100644 --- a/keystone/tests/unit/identity/test_backend_sql.py +++ b/keystone/tests/unit/identity/test_backend_sql.py @@ -15,6 +15,7 @@ import freezegun from oslo_utils import timeutils +import passlib.hash from keystone.common import password_hashers from keystone.common import password_hashing @@ -72,10 +73,31 @@ def config_overrides(self): ) def test_configured_algorithm_used(self): + user_ref = self._get_user(self.user_foo['id']) + self.assertEqual( + password_hashers.scrypt.Scrypt, + password_hashing._get_hasher_from_ident(user_ref.password), + ) + + def _get_user(self, user_id): with sql.session_for_read() as session: - user_ref = PROVIDERS.identity_api._get_user( - session, self.user_foo['id'] + user_ref = PROVIDERS.identity_api._get_user(session, user_id) + return user_ref + + def test_deprecated_password_hash_is_updated_on_auth(self): + user_ref = self._get_user(self.user_foo['id']) + with sql.session_for_write() as session: + old_hash = passlib.hash.sha512_crypt.hash( + self.user_foo['password'] ) + user_ref.password_ref.password_hash = old_hash + session.add(user_ref.password_ref) + + PROVIDERS.identity_api.driver.authenticate( + self.user_foo['id'], self.user_foo['password'] + ) + user_ref = self._get_user(self.user_foo['id']) + self.assertEqual( password_hashers.scrypt.Scrypt, password_hashing._get_hasher_from_ident(user_ref.password), diff --git a/keystone/tests/unit/test_backend_ldap.py b/keystone/tests/unit/test_backend_ldap.py index 946fa18100..359284a0f3 100644 --- a/keystone/tests/unit/test_backend_ldap.py +++ b/keystone/tests/unit/test_backend_ldap.py @@ -21,7 +21,7 @@ import fixtures import ldap from oslo_log import versionutils -import pkg_resources +import pkg_resources # type: ignore from testtools import matchers from keystone.common import cache diff --git a/keystone/tests/unit/test_cli.py b/keystone/tests/unit/test_cli.py index 9949439fe5..4fb64642d6 100644 --- a/keystone/tests/unit/test_cli.py +++ b/keystone/tests/unit/test_cli.py @@ -1619,8 +1619,9 @@ def test_password_regular_expression_description_deactivated(self): class TokensDoctorTests(unit.TestCase): def test_unreasonable_max_token_size_raised(self): # Symptom Detected: the max_token_size for fernet is greater than 255 + # ccloud: not 255, but 268 self.config_fixture.config(group='token', provider='fernet') - self.config_fixture.config(max_token_size=256) + self.config_fixture.config(max_token_size=269) self.assertTrue(tokens.symptom_unreasonable_max_token_size()) def test_unreasonable_max_token_size_not_raised(self): @@ -1630,8 +1631,9 @@ def test_unreasonable_max_token_size_not_raised(self): self.assertFalse(tokens.symptom_unreasonable_max_token_size()) # No Symptom Detected: the max_token_size for fernet is 255 or less + # ccloud: not 255, but 268 self.config_fixture.config(group='token', provider='fernet') - self.config_fixture.config(max_token_size=255) + self.config_fixture.config(max_token_size=268) self.assertFalse(tokens.symptom_unreasonable_max_token_size()) diff --git a/keystone/tests/unit/test_contrib_ec2_core.py b/keystone/tests/unit/test_contrib_ec2_core.py index b23bdbc896..534cf894ea 100644 --- a/keystone/tests/unit/test_contrib_ec2_core.py +++ b/keystone/tests/unit/test_contrib_ec2_core.py @@ -62,20 +62,34 @@ def _test_valid_authentication_response_with_proper_secret(self, **kwargs): }, } credentials['signature'] = signer.generate(credentials) + # Authenticate as system admin by default unless overridden via kwargs + token = None + if 'noauth' in kwargs and kwargs['noauth']: + token = None + else: + PROVIDERS.assignment_api.create_system_grant_for_user( + self.user_id, self.role_id + ) + token = self.get_system_scoped_token() + + expected_status = kwargs.get('expected_status', http.client.OK) resp = self.post( '/ec2tokens', body={'credentials': credentials}, - expected_status=http.client.OK, - **kwargs, + expected_status=expected_status, + token=token, + noauth=kwargs.get('noauth'), ) - self.assertValidProjectScopedTokenResponse(resp, self.user) + if expected_status == http.client.OK: + self.assertValidProjectScopedTokenResponse(resp, self.user) def test_valid_authentication_response_with_proper_secret(self): self._test_valid_authentication_response_with_proper_secret() def test_valid_authentication_response_with_proper_secret_noauth(self): + # ec2 endpoint now enforces RBAC; unauthenticated should be denied self._test_valid_authentication_response_with_proper_secret( - noauth=True + expected_status=http.client.UNAUTHORIZED, noauth=True ) def test_valid_authentication_response_with_signature_v4(self): diff --git a/keystone/tests/unit/test_contrib_s3_core.py b/keystone/tests/unit/test_contrib_s3_core.py index 60637f19d7..5b431fa4d8 100644 --- a/keystone/tests/unit/test_contrib_s3_core.py +++ b/keystone/tests/unit/test_contrib_s3_core.py @@ -55,7 +55,7 @@ def test_http_get_method_not_allowed(self): ) self.assertEqual(http.client.METHOD_NOT_ALLOWED, resp.status_code) - def _test_good_response(self, **kwargs): + def _test_good_response(self, expected_status=http.client.OK, **kwargs): sts = 'string to sign' # opaque string from swift3 sig = hmac.new( self.cred_blob['secret'].encode('ascii'), @@ -71,18 +71,22 @@ def _test_good_response(self, **kwargs): 'token': base64.b64encode(sts.encode('ascii')).strip(), } }, - expected_status=http.client.OK, + expected_status=expected_status, **kwargs, ) - self.assertValidProjectScopedTokenResponse( - resp, self.user, forbid_token_id=True - ) + if expected_status == http.client.OK: + self.assertValidProjectScopedTokenResponse( + resp, self.user, forbid_token_id=True + ) + else: + self.assertValidErrorResponse(resp) def test_good_response(self): self._test_good_response() def test_good_response_noauth(self): - self._test_good_response(noauth=True) + # s3tokens now requires service/admin auth; unauthenticated should be denied + self._test_good_response(http.client.UNAUTHORIZED, noauth=True) def test_bad_request(self): self.post( diff --git a/keystone/tests/unit/test_policy.py b/keystone/tests/unit/test_policy.py index eaff961ff4..14f04fb1a2 100644 --- a/keystone/tests/unit/test_policy.py +++ b/keystone/tests/unit/test_policy.py @@ -242,6 +242,7 @@ def test_all_targets_documented(self): 'service_role', 'token_subject', 'domain_managed_target_role', + 'cloud_admin', ] def read_doc_targets(): diff --git a/keystone/tests/unit/test_user_group_cache_invalidation.py b/keystone/tests/unit/test_user_group_cache_invalidation.py new file mode 100644 index 0000000000..9ee95fe6c6 --- /dev/null +++ b/keystone/tests/unit/test_user_group_cache_invalidation.py @@ -0,0 +1,221 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock +import uuid + +from keystone.common import provider_api +from keystone.tests import unit +from keystone.tests.unit import default_fixtures +from keystone.tests.unit.ksfixtures import database + +PROVIDERS = provider_api.ProviderAPIs + + +class FederatedGroupCacheTest(unit.TestCase): + """Test federated group membership cache invalidation.""" + + def setUp(self): + # Mock LDAP to avoid environment-specific errors + ldap_patcher = mock.patch('ldap.initialize') + self.addCleanup(ldap_patcher.stop) + ldap_patcher.start() + + # Mock LDAP options that are checked during setup + with mock.patch('ldap.get_option', return_value=None): + with mock.patch('ldap.set_option'): + super().setUp() + self.useFixture(database.Database()) + self.load_backends() + + # Create domain + PROVIDERS.resource_api.create_domain( + default_fixtures.ROOT_DOMAIN['id'], default_fixtures.ROOT_DOMAIN + ) + + # Create IDP, mapping, and protocol + self.idp = { + 'id': uuid.uuid4().hex, + 'enabled': True, + 'description': uuid.uuid4().hex, + } + self.mapping = {'id': uuid.uuid4().hex} + self.protocol = { + 'id': uuid.uuid4().hex, + 'idp_id': self.idp['id'], + 'mapping_id': self.mapping['id'], + } + + PROVIDERS.federation_api.create_idp(self.idp['id'], self.idp) + PROVIDERS.federation_api.create_mapping( + self.mapping['id'], self.mapping + ) + PROVIDERS.federation_api.create_protocol( + self.idp['id'], self.protocol['id'], self.protocol + ) + + self.domain_id = PROVIDERS.federation_api.get_idp(self.idp['id'])[ + 'domain_id' + ] + + # Create a federated user + self.federated_user = { + 'idp_id': self.idp['id'], + 'protocol_id': self.protocol['id'], + 'unique_id': uuid.uuid4().hex, + 'display_name': uuid.uuid4().hex, + } + self.user = PROVIDERS.shadow_users_api.create_federated_user( + self.domain_id, self.federated_user + ) + + # Create groups + self.group1 = unit.new_group_ref(domain_id=self.domain_id) + self.group1 = PROVIDERS.identity_api.create_group(self.group1) + + self.group2 = unit.new_group_ref(domain_id=self.domain_id) + self.group2 = PROVIDERS.identity_api.create_group(self.group2) + + def test_group_membership_returns_true_for_new_membership(self): + """Test that adding a new group membership returns True.""" + result = PROVIDERS.shadow_users_api.add_user_to_group_expires( + self.user['id'], self.group1['id'] + ) + self.assertTrue(result) + + def test_group_membership_returns_false_for_renewal(self): + """Test that renewing an existing group membership returns False.""" + PROVIDERS.shadow_users_api.add_user_to_group_expires( + self.user['id'], self.group1['id'] + ) + result = PROVIDERS.shadow_users_api.add_user_to_group_expires( + self.user['id'], self.group1['id'] + ) + self.assertFalse(result) + + def test_new_group_membership_triggers_token_revocation(self): + """Test that new group membership triggers token revocation.""" + with mock.patch.object( + PROVIDERS.assignment_api, 'invalidate_user_cache_on_group_change' + ) as mock_revoke: + user_dict = { + 'id': self.federated_user['unique_id'], + 'name': self.federated_user['display_name'], + 'domain': {'id': self.domain_id}, + } + + PROVIDERS.identity_api.shadow_federated_user( + self.idp['id'], + self.protocol['id'], + user_dict, + group_ids=[self.group1['id']], + ) + + mock_revoke.assert_called_once_with(self.user['id']) + + def test_membership_renewal_does_not_trigger_revocation(self): + """Test that pure membership renewal doesn't trigger token revocation.""" + with mock.patch.object( + PROVIDERS.shadow_users_api, + 'add_user_to_group_expires', + return_value=False, + ): + with mock.patch.object( + PROVIDERS.shadow_users_api, + 'cleanup_stale_group_memberships', + return_value=False, + ): + with mock.patch.object( + PROVIDERS.assignment_api, + 'invalidate_user_cache_on_group_change', + ) as mock_revoke: + user_dict = { + 'id': self.federated_user['unique_id'], + 'name': self.federated_user['display_name'], + 'domain': {'id': self.domain_id}, + } + + PROVIDERS.identity_api.shadow_federated_user( + self.idp['id'], + self.protocol['id'], + user_dict, + group_ids=[self.group1['id']], + ) + + mock_revoke.assert_not_called() + + def test_adding_additional_group_triggers_revocation(self): + """Test that adding an additional group triggers token revocation.""" + PROVIDERS.shadow_users_api.add_user_to_group_expires( + self.user['id'], self.group1['id'] + ) + + with mock.patch.object( + PROVIDERS.assignment_api, 'invalidate_user_cache_on_group_change' + ) as mock_revoke: + user_dict = { + 'id': self.federated_user['unique_id'], + 'name': self.federated_user['display_name'], + 'domain': {'id': self.domain_id}, + } + + PROVIDERS.identity_api.shadow_federated_user( + self.idp['id'], + self.protocol['id'], + user_dict, + group_ids=[self.group1['id'], self.group2['id']], + ) + + mock_revoke.assert_called_once_with(self.user['id']) + + def test_empty_group_list_does_not_trigger_revocation(self): + """Test that shadowing with no groups doesn't trigger revocation.""" + with mock.patch.object( + PROVIDERS.assignment_api, 'invalidate_user_cache_on_group_change' + ) as mock_revoke: + user_dict = { + 'id': self.federated_user['unique_id'], + 'name': self.federated_user['display_name'], + 'domain': {'id': self.domain_id}, + } + + PROVIDERS.identity_api.shadow_federated_user( + self.idp['id'], self.protocol['id'], user_dict, group_ids=[] + ) + + mock_revoke.assert_not_called() + + @mock.patch('keystone.notifications.invalidate_token_cache_notification') + def test_token_revocation_persists_event_and_invalidates_cache( + self, mock_cache_notification + ): + """Test that token revocation creates revoke event and invalidates cache.""" + with mock.patch.object(PROVIDERS.revoke_api, 'revoke') as mock_revoke: + PROVIDERS.assignment_api.invalidate_user_cache_on_group_change( + self.user['id'] + ) + + # Verify revoke was called with a RevokeEvent + mock_revoke.assert_called_once() + revoke_event = mock_revoke.call_args[0][0] + + # Check that the revoke event has the correct user_id + self.assertEqual(revoke_event.user_id, self.user['id']) + + # Check that issued_before is set (indicating -1 second logic) + self.assertIsNotNone(revoke_event.issued_before) + + # Verify cache invalidation notification was called + mock_cache_notification.assert_called_once() + call_args = mock_cache_notification.call_args[0][0] + self.assertIn(self.user['id'], call_args) + self.assertIn('group membership changed', call_args) diff --git a/keystone/tests/unit/test_user_group_cache_invalidation_integration.py b/keystone/tests/unit/test_user_group_cache_invalidation_integration.py new file mode 100644 index 0000000000..d725a7a482 --- /dev/null +++ b/keystone/tests/unit/test_user_group_cache_invalidation_integration.py @@ -0,0 +1,348 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Integration tests for federated group membership cache invalidation.""" + +from unittest import mock +import uuid + +from keystone.common import provider_api +from keystone.tests import unit +from keystone.tests.unit import default_fixtures +from keystone.tests.unit.ksfixtures import database + +PROVIDERS = provider_api.ProviderAPIs + + +class FederatedGroupCacheIntegrationTest(unit.TestCase): + """Integration test for federated group membership cache invalidation.""" + + def setUp(self): + # Mock LDAP to avoid environment-specific errors + ldap_patcher = mock.patch('ldap.initialize') + self.addCleanup(ldap_patcher.stop) + ldap_patcher.start() + + # Mock LDAP options that are checked during setup + with mock.patch('ldap.get_option', return_value=None): + with mock.patch('ldap.set_option'): + super().setUp() + + self.useFixture(database.Database()) + self.load_backends() + + # Create domain + PROVIDERS.resource_api.create_domain( + default_fixtures.ROOT_DOMAIN['id'], default_fixtures.ROOT_DOMAIN + ) + + # Create IDP, mapping, and protocol + self.idp = { + 'id': uuid.uuid4().hex, + 'enabled': True, + 'description': 'Test Identity Provider', + } + self.mapping = {'id': uuid.uuid4().hex, 'rules': []} + self.protocol = { + 'id': uuid.uuid4().hex, + 'idp_id': self.idp['id'], + 'mapping_id': self.mapping['id'], + } + + PROVIDERS.federation_api.create_idp(self.idp['id'], self.idp) + PROVIDERS.federation_api.create_mapping( + self.mapping['id'], self.mapping + ) + PROVIDERS.federation_api.create_protocol( + self.idp['id'], self.protocol['id'], self.protocol + ) + + self.domain_id = PROVIDERS.federation_api.get_idp(self.idp['id'])[ + 'domain_id' + ] + + # Create groups + self.group1 = unit.new_group_ref( + domain_id=self.domain_id, name='group1' + ) + self.group1 = PROVIDERS.identity_api.create_group(self.group1) + + self.group2 = unit.new_group_ref( + domain_id=self.domain_id, name='group2' + ) + self.group2 = PROVIDERS.identity_api.create_group(self.group2) + + self.group3 = unit.new_group_ref( + domain_id=self.domain_id, name='group3' + ) + self.group3 = PROVIDERS.identity_api.create_group(self.group3) + + def test_complete_user_shadowing_workflow(self): + """Test complete workflow of user shadowing with cache invalidation.""" + federated_user = { + 'idp_id': self.idp['id'], + 'protocol_id': self.protocol['id'], + 'unique_id': uuid.uuid4().hex, + 'display_name': 'Test User', + } + + user_dict = { + 'id': federated_user['unique_id'], + 'name': federated_user['display_name'], + 'domain': {'id': self.domain_id}, + } + + with mock.patch.object( + PROVIDERS.assignment_api, 'invalidate_user_cache_on_group_change' + ) as mock_revoke: + user = PROVIDERS.identity_api.shadow_federated_user( + self.idp['id'], + self.protocol['id'], + user_dict, + group_ids=[self.group1['id']], + ) + + self.assertEqual(0, mock_revoke.call_count) + mock_revoke.reset_mock() + + user = PROVIDERS.identity_api.shadow_federated_user( + self.idp['id'], + self.protocol['id'], + user_dict, + group_ids=[self.group1['id'], self.group2['id']], + ) + + self.assertGreaterEqual(mock_revoke.call_count, 1) + mock_revoke.reset_mock() + + user = PROVIDERS.identity_api.shadow_federated_user( + self.idp['id'], + self.protocol['id'], + user_dict, + group_ids=[self.group2['id'], self.group3['id']], + ) + + self.assertEqual(1, mock_revoke.call_count) + + def test_multiple_users_concurrent_operations(self): + """Test cache invalidation with multiple concurrent user operations.""" + users = [] + for i in range(3): + fed_user = { + 'idp_id': self.idp['id'], + 'protocol_id': self.protocol['id'], + 'unique_id': uuid.uuid4().hex, + 'display_name': f'User {i}', + } + user_dict = { + 'id': fed_user['unique_id'], + 'name': fed_user['display_name'], + 'domain': {'id': self.domain_id}, + } + users.append((fed_user, user_dict)) + + invalidation_counts = {} + + original_revoke = ( + PROVIDERS.assignment_api.invalidate_user_cache_on_group_change + ) + + def track_revocation(user_id): + invalidation_counts[user_id] = ( + invalidation_counts.get(user_id, 0) + 1 + ) + return original_revoke(user_id) + + with mock.patch.object( + PROVIDERS.assignment_api, + 'invalidate_user_cache_on_group_change', + side_effect=track_revocation, + ): + shadowed_users = [] + for fed_user, user_dict in users: + user = PROVIDERS.identity_api.shadow_federated_user( + self.idp['id'], + self.protocol['id'], + user_dict, + group_ids=[self.group1['id']], + ) + shadowed_users.append(user) + + self.assertEqual(0, len(invalidation_counts)) + + PROVIDERS.identity_api.shadow_federated_user( + self.idp['id'], + self.protocol['id'], + users[1][1], + group_ids=[self.group1['id'], self.group2['id']], + ) + + self.assertEqual(1, len(invalidation_counts)) + self.assertEqual(1, invalidation_counts[shadowed_users[1]['id']]) + + def test_expired_membership_removal_triggers_revocation(self): + """Test that expired membership removal triggers cache invalidation.""" + federated_user = { + 'idp_id': self.idp['id'], + 'protocol_id': self.protocol['id'], + 'unique_id': uuid.uuid4().hex, + 'display_name': 'Expiry Test User', + } + + user_dict = { + 'id': federated_user['unique_id'], + 'name': federated_user['display_name'], + 'domain': {'id': self.domain_id}, + } + + user = PROVIDERS.identity_api.shadow_federated_user( + self.idp['id'], + self.protocol['id'], + user_dict, + group_ids=[self.group1['id']], + ) + + with mock.patch.object( + PROVIDERS.shadow_users_api, + 'cleanup_stale_group_memberships', + return_value=True, + ): + with mock.patch.object( + PROVIDERS.assignment_api, + 'invalidate_user_cache_on_group_change', + ) as mock_revoke: + PROVIDERS.identity_api.shadow_federated_user( + self.idp['id'], + self.protocol['id'], + user_dict, + group_ids=[self.group1['id']], + ) + + mock_revoke.assert_called_once_with(user['id']) + + def test_role_assignments_reflect_group_changes(self): + """Test that role assignments reflect group membership changes.""" + project1 = unit.new_project_ref(domain_id=self.domain_id) + project1 = PROVIDERS.resource_api.create_project( + project1['id'], project1 + ) + + project2 = unit.new_project_ref(domain_id=self.domain_id) + project2 = PROVIDERS.resource_api.create_project( + project2['id'], project2 + ) + + role_admin = unit.new_role_ref(name='admin') + role_admin = PROVIDERS.role_api.create_role( + role_admin['id'], role_admin + ) + + role_member = unit.new_role_ref(name='member') + role_member = PROVIDERS.role_api.create_role( + role_member['id'], role_member + ) + + PROVIDERS.assignment_api.create_grant( + role_admin['id'], + group_id=self.group1['id'], + project_id=project1['id'], + ) + + PROVIDERS.assignment_api.create_grant( + role_member['id'], + group_id=self.group2['id'], + project_id=project2['id'], + ) + + federated_user = { + 'idp_id': self.idp['id'], + 'protocol_id': self.protocol['id'], + 'unique_id': uuid.uuid4().hex, + 'display_name': 'Role Test User', + } + + user_dict = { + 'id': federated_user['unique_id'], + 'name': federated_user['display_name'], + 'domain': {'id': self.domain_id}, + } + + user = PROVIDERS.identity_api.shadow_federated_user( + self.idp['id'], + self.protocol['id'], + user_dict, + group_ids=[self.group1['id']], + ) + + PROVIDERS.identity_api.add_user_to_group(user['id'], self.group1['id']) + + roles_proj1 = PROVIDERS.assignment_api.list_role_assignments( + user_id=user['id'], project_id=project1['id'], effective=True + ) + self.assertEqual(1, len(roles_proj1)) + self.assertEqual(role_admin['id'], roles_proj1[0]['role_id']) + + PROVIDERS.identity_api.remove_user_from_group( + user['id'], self.group1['id'] + ) + PROVIDERS.identity_api.add_user_to_group(user['id'], self.group2['id']) + PROVIDERS.assignment_api.invalidate_user_cache_on_group_change( + user['id'] + ) + + roles_proj2 = PROVIDERS.assignment_api.list_role_assignments( + user_id=user['id'], project_id=project2['id'], effective=True + ) + self.assertEqual(1, len(roles_proj2)) + self.assertEqual(role_member['id'], roles_proj2[0]['role_id']) + + def test_token_revocation_event_persisted(self): + """Test that revoke event is created on group change.""" + federated_user = { + 'idp_id': self.idp['id'], + 'protocol_id': self.protocol['id'], + 'unique_id': uuid.uuid4().hex, + 'display_name': 'Revocation Test User', + } + + user_dict = { + 'id': federated_user['unique_id'], + 'name': federated_user['display_name'], + 'domain': {'id': self.domain_id}, + } + + # Initial shadowing - no revocation expected + user = PROVIDERS.identity_api.shadow_federated_user( + self.idp['id'], + self.protocol['id'], + user_dict, + group_ids=[self.group1['id']], + ) + + # Shadow again with additional group - revocation expected + with mock.patch.object(PROVIDERS.revoke_api, 'revoke') as mock_revoke: + user = PROVIDERS.identity_api.shadow_federated_user( + self.idp['id'], + self.protocol['id'], + user_dict, + group_ids=[self.group1['id'], self.group2['id']], + ) + + # Verify revoke was called with a RevokeEvent + mock_revoke.assert_called_once() + revoke_event = mock_revoke.call_args[0][0] + + # Check that the revoke event has the correct user_id + self.assertEqual(revoke_event.user_id, user['id']) + + # Check that issued_before is set (indicating -1 second logic) + self.assertIsNotNone(revoke_event.issued_before) diff --git a/keystone/tests/unit/test_v3_application_credential.py b/keystone/tests/unit/test_v3_application_credential.py index 0ca075807c..df0c12095d 100644 --- a/keystone/tests/unit/test_v3_application_credential.py +++ b/keystone/tests/unit/test_v3_application_credential.py @@ -12,6 +12,7 @@ import datetime import http.client +import unittest import uuid from oslo_utils import timeutils @@ -742,7 +743,10 @@ def test_list_access_rules(self): ar = r.json["access_rules"] self.assertEqual(access_rules[0]["method"], ar[0]["method"]) - def test_list_access_rules_wrong_qp(self): + # TODO(stephenfin): This will pass once we increase strictness of the query + # string validation + @unittest.expectedFailure + def test_list_access_rules_invalid_qs(self): with self.test_client() as c: token = self.get_scoped_token() # Invoke GET access_rules with unsupported query parameters and @@ -778,6 +782,15 @@ def test_show_access_rule(self): expected_status_code=http.client.OK, headers={"X-Auth-Token": token}, ) + + # TODO(stephenfin): This will pass once we increase strictness of the query + # string validation + @unittest.expectedFailure + def test_show_access_rule_invalid_qs(self): + with self.test_client() as c: + token = self.get_scoped_token() + # Invoke GET access_rules/{id} with unsupported query parameters and + # trigger internal validation c.get( f"/v3/users/{self.user_id}/access_rules/{access_rule_id}" "?foo=bar", diff --git a/keystone/tests/unit/test_v3_credential.py b/keystone/tests/unit/test_v3_credential.py index dd212c667f..1ae6a98d6b 100644 --- a/keystone/tests/unit/test_v3_credential.py +++ b/keystone/tests/unit/test_v3_credential.py @@ -90,10 +90,15 @@ def _test_get_token(self, access, secret): 'path': '/bar', 'params': params, } + PROVIDERS.assignment_api.create_system_grant_for_user( + self.user_id, self.role_id + ) + token = self.get_system_scoped_token() r = self.post( '/ec2tokens', body={'ec2Credentials': sig_ref}, expected_status=http.client.OK, + token=token, ) self.assertValidTokenResponse(r) return r.result['token'] diff --git a/keystone/tests/unit/test_v3_resource.py b/keystone/tests/unit/test_v3_resource.py index ced9419874..1f0c2b8db7 100644 --- a/keystone/tests/unit/test_v3_resource.py +++ b/keystone/tests/unit/test_v3_resource.py @@ -27,6 +27,9 @@ CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs +_DEFAULT_TAG = ['single_tag'] +_DEFAULT_TAGS = [None, [], ['vc-a-0', 'tag_1', 'tag_2'], _DEFAULT_TAG] + class ResourceTestCase(test_v3.RestfulTestCase, test_v3.AssignmentTestMixin): """Test domains and projects.""" @@ -843,7 +846,7 @@ def _create_projects_hierarchy(self, hierarchy_size=1): return projects - def _create_project_and_tags(self, num_of_tags=1): + def _create_project_and_tags(self, num_of_tags=1, with_default_tag=False): """Create a project and a number of tags attached to that project. :param num_of_tags: the desired number of tags created with a specified @@ -853,7 +856,16 @@ def _create_project_and_tags(self, num_of_tags=1): random tags """ tags = [uuid.uuid4().hex for i in range(num_of_tags)] - ref = unit.new_project_ref(domain_id=self.domain_id, tags=tags) + + if num_of_tags > 0: + if with_default_tag: + # remove the last tag + tags.pop() + # add default tag instead + tags += _DEFAULT_TAG + ref = unit.new_project_ref(domain_id=self.domain_id, tags=tags) + else: + ref = unit.new_project_without_tags_ref(domain_id=self.domain_id) resp = self.post('/projects', body={'project': ref}) return resp.result['project'], tags @@ -1382,6 +1394,50 @@ def test_list_project_is_domain_filter(self): new_regular_project['id'], [p['id'] for p in r.result['projects']] ) + def test_list_projects_is_domain_filter_domain_scoped_token(self): + """Call ``GET /projects?is_domain=True/False`` with domain scope.""" + # grant the domain role to user + path = '/domains/{}/users/{}/roles/{}'.format( + self.domain_id, self.user['id'], self.role['id'] + ) + self.put(path=path) + + auth = self.build_authentication_request( + user_id=self.user['id'], + password=self.user['password'], + domain_id=self.domain_id, + ) + + # Check that listing the domains does not result in an empty list + new_is_domain_project = unit.new_project_ref(is_domain=True) + new_is_domain_project = PROVIDERS.resource_api.create_project( + new_is_domain_project['id'], new_is_domain_project + ) + + r = self.get( + '/projects?is_domain=True', auth=auth, expected_status=200 + ) + self.assertIn( + new_is_domain_project['id'], + [p['id'] for p in r.result['projects']], + ) + + # Check that the projects are still being filtered + # The previously created is_domain project is a domain, so + # we can reuse it for the project + new_regular_project = unit.new_project_ref( + is_domain=False, domain_id=new_is_domain_project['id'] + ) + new_regular_project = PROVIDERS.resource_api.create_project( + new_regular_project['id'], new_regular_project + ) + r = self.get( + '/projects?is_domain=False', auth=auth, expected_status=200 + ) + self.assertNotIn( + new_regular_project['id'], [p['id'] for p in r.result['projects']] + ) + def test_list_project_is_domain_filter_default(self): """Default project list should not see projects acting as domains.""" # Get the initial count of regular projects @@ -1760,14 +1816,23 @@ def test_delete_not_leaf_project(self): ) def test_create_project_with_tags(self): - project, tags = self._create_project_and_tags(num_of_tags=10) - ref = self.get( - '/projects/{project_id}'.format(project_id=project['id']), - expected_status=http.client.OK, - ) - self.assertIn('tags', ref.result['project']) - for tag in tags: - self.assertIn(tag, ref.result['project']['tags']) + for config_setting in _DEFAULT_TAGS: + if config_setting is not None: + self.config_fixture.config(default_tag=config_setting) + for tag_number in [0, 10]: + project, tags = self._create_project_and_tags( + num_of_tags=tag_number, with_default_tag=True + ) + ref = self.get( + '/projects/{project_id}'.format(project_id=project['id']), + expected_status=http.client.OK, + ) + self.assertIn('tags', ref.result['project']) + for tag in tags: + self.assertIn(tag, ref.result['project']['tags']) + if config_setting is not None: + for tag in config_setting: + self.assertIn(tag, ref.result['project']['tags']) def test_update_project_with_tags(self): project, tags = self._create_project_and_tags(num_of_tags=9) diff --git a/keystone/trust/schema.py b/keystone/trust/schema.py index 3d34987857..20c85b3907 100644 --- a/keystone/trust/schema.py +++ b/keystone/trust/schema.py @@ -170,7 +170,10 @@ "the trust.", }, }, - "additionalProperties": False, + # TODO(stephenfin): Change this to False once we have schemas for all + # resources. Doing so will remove comparator (trustor_user_id__icontains) + # support. + "additionalProperties": True, } trust_index_response_body: dict[str, Any] = { @@ -190,7 +193,9 @@ trust_request_query: dict[str, Any] = { "type": "object", "properties": {}, - "additionalProperties": False, + # TODO(stephenfin): Change this to False once we have schemas for all + # resources. + "additionalProperties": True, } trust_response_body: dict[str, Any] = { diff --git a/releasenotes/notes/bug-1794527-866b1caff67977f3.yaml b/releasenotes/notes/bug-1794527-866b1caff67977f3.yaml index 6a8134fd24..04435288d6 100644 --- a/releasenotes/notes/bug-1794527-866b1caff67977f3.yaml +++ b/releasenotes/notes/bug-1794527-866b1caff67977f3.yaml @@ -18,4 +18,4 @@ features: the old one, and having spurious failures, or undesirecd domain_id matching. - https://specs.openstack.org/openstack/keystone-specs/specs/keystone/stein/explicit-domains-ids.html \ No newline at end of file + https://specs.openstack.org/openstack/keystone-specs/specs/keystone/train/explicit-domains-ids.html diff --git a/releasenotes/notes/migrate-deprecated-password-hashes-0ffdf9eefb9ff872.yaml b/releasenotes/notes/migrate-deprecated-password-hashes-0ffdf9eefb9ff872.yaml new file mode 100644 index 0000000000..9661892fb3 --- /dev/null +++ b/releasenotes/notes/migrate-deprecated-password-hashes-0ffdf9eefb9ff872.yaml @@ -0,0 +1,13 @@ +--- +critical: + - | + The Epoxy release of Keystone drops support for passwords that were hashed + using the sha512_crypt algorithm, which was the default in Ocata and + earlier. As a result any passwords that are hashed with sha512_crypt will + not be readable in Epoxy. This release adds the ability for the SQL + identity backend to update passwords that were hashed with sha512_crypt. + The rehashing is done when the user authenticates with the correct password, + so if you have user passwords that are hashed with the sha512_crypt hash + then users will need to authenticate at least once before Keystone is + upgraded by Epoxy. Users who still have passwords hashed in sha512_crypt + after Keystone is upgrade to Epoxy will need to perform a password reset. diff --git a/releasenotes/notes/pci-dss-invalid-password-reporting-975955d2d79c21b3.yaml b/releasenotes/notes/pci-dss-invalid-password-reporting-975955d2d79c21b3.yaml new file mode 100644 index 0000000000..2e15cf7d42 --- /dev/null +++ b/releasenotes/notes/pci-dss-invalid-password-reporting-975955d2d79c21b3.yaml @@ -0,0 +1,10 @@ +--- +features: + - | + `bug 2060972 `_ + Added new configuration option ``[security_compliance] + report_invalid_password_hash`` to enable and configure reporting of hashes + of submitted invalid passwords, which could be used to facilitate analysis + of failed login attempts (as per PCI DSS requirements). The corresponding + Keystone specification - `Include invalid password details in audit + messages `_. diff --git a/requirements.txt b/requirements.txt index 3b03fc21d4..6af650405c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -37,3 +37,7 @@ jsonschema>=3.2.0 # MIT pycadf!=2.0.0,>=1.1.0 # Apache-2.0 msgpack>=0.5.0 # Apache-2.0 osprofiler>=1.4.0 # Apache-2.0 + +python-ldap>=3.0.0 # PSF +ldappool>=2.3.1 # MPL +python-memcached>=1.56 # PSF diff --git a/tox.ini b/tox.ini index 06e8354ab8..18f7307a5b 100644 --- a/tox.ini +++ b/tox.ini @@ -11,14 +11,15 @@ setenv = # TODO(stephenfin): Remove once we bump our upper-constraint to SQLAlchemy 2.0 SQLALCHEMY_WARN_20=1 deps = - -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} + -c{env:TOX_CONSTRAINTS_FILE:https://raw.githubusercontent.com/sapcc/requirements/stable/2025.1-m3/upper-constraints.txt} -r{toxinidir}/test-requirements.txt + -r{toxinidir}/custom-requirements.txt .[ldap,memcache] commands = stestr run {posargs} allowlist_externals = bash -passenv = http_proxy,HTTP_PROXY,https_proxy,HTTPS_PROXY,no_proxy,NO_PROXY,PBR_VERSION +passenv = http_proxy,HTTP_PROXY,https_proxy,HTTPS_PROXY,no_proxy,NO_PROXY,PBR_VERSION,WATCHER_DISABLED [testenv:pep8] deps = @@ -41,7 +42,7 @@ allowlist_externals = {toxinidir}/tools/fast8.sh # NOTE(browne): This is required for the integration test job of the bandit # project. Please do not remove. deps = - -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} + -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/2025.1} -r{toxinidir}/requirements.txt commands = bandit -r keystone -x 'keystone/tests/*' @@ -71,7 +72,7 @@ passenv = KSTEST_* [testenv:functional] deps = - -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} + -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/2025.1} -r{toxinidir}/test-requirements.txt setenv = OS_TEST_PATH=./keystone/tests/functional commands = @@ -107,7 +108,7 @@ max-complexity = 24 [testenv:docs] deps = - -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} + -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/2025.1} -r{toxinidir}/doc/requirements.txt .[ldap,memcache] commands= @@ -169,6 +170,12 @@ paths = ./keystone/tests/hacking deps = bindep commands = bindep test +[testenv:lower-constraints] +deps = + -c{toxinidir}/lower-constraints.txt + -r{toxinidir}/test-requirements.txt + .[ldap,memcache] + [testenv:protection] commands = stestr run --test-path=./keystone/tests/protection {posargs}