Add int storage of datetime for password created/expires

Due to MySQL (in some versions) not storing datetime resolution below
one second, keystone occasionally ends up with weird behavior such as
a New password not being valid. The password created at and expires at
columns now store both datetime (for rolling upgrades) and integers.

Keystone from Pike and beyond leans on the new created_at_int column
and expires_at_int column.

Change-Id: I2c219b4b9b353f1e2cce6088849a773196f0e443
Closes-Bug: #1702211
This commit is contained in:
Morgan Fainberg 2017-08-11 14:56:30 -07:00 committed by Lance Bragstad
parent 2164d0550c
commit 38974af24c
11 changed files with 298 additions and 4 deletions

View File

@ -0,0 +1,61 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import pytz
import sqlalchemy as sql
from sqlalchemy.orm import sessionmaker
_epoch = datetime.datetime.fromtimestamp(0, tz=pytz.UTC)
def _convert_value_datetime_to_int(dt):
dt = dt.replace(tzinfo=pytz.utc)
return int((dt - _epoch).total_seconds() * 1000000)
def upgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
maker = sessionmaker(bind=migrate_engine)
session = maker()
password_table = sql.Table('password', meta, autoload=True)
passwords = list(password_table.select().execute())
for passwd in passwords:
values = {
'created_at_int': _convert_value_datetime_to_int(passwd.created_at)
}
if passwd.expires_at is not None:
values['expires_at_int'] = _convert_value_datetime_to_int(
passwd.expires_at)
update = password_table.update().where(
password_table.c.id == passwd.id).values(values)
session.execute(update)
session.commit()
password_table = sql.Table('password', meta, autoload=True)
# The created_at_int data cannot really be nullable long term. This
# corrects the data to be not nullable, but must be done in the contract
# phase for two reasons. The first is due to "additive only" requirements.
# The second is because we need to ensure all nodes in the deployment are
# running the Pike code-base before we migrate all password entries. This
# avoids locking the password table or having a partial outage while doing
# the migration.
password_table.c.created_at_int.alter(nullable=False, default=0,
server_default='0')
session.close()

View File

@ -18,7 +18,9 @@ Before using this module, call initialize(). This has to be done before
CONF() because it sets up configuration options.
"""
import datetime
import functools
import pytz
from oslo_db import exception as db_exception
from oslo_db import options as db_options
@ -26,6 +28,7 @@ from oslo_db.sqlalchemy import enginefacade
from oslo_db.sqlalchemy import models
from oslo_log import log
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from osprofiler import opts as profiler
import osprofiler.sqlalchemy
import six
@ -123,6 +126,51 @@ class JsonBlob(sql_types.TypeDecorator):
return jsonutils.loads(value)
class DateTimeInt(sql_types.TypeDecorator):
"""A column that automatically converts a datetime object to an Int.
Keystone relies on accurate (sub-second) datetime objects. In some cases
the RDBMS drop sub-second accuracy (some versions of MySQL). This field
automatically converts the value to an INT when storing the data and
back to a datetime object when it is loaded from the database.
NOTE: Any datetime object that has timezone data will be converted to UTC.
Any datetime object that has no timezone data will be assumed to be
UTC and loaded from the DB as such.
"""
impl = sql.BigInteger
epoch = datetime.datetime.fromtimestamp(0, tz=pytz.UTC)
def process_bind_param(self, value, dialect):
if value is None:
return value
else:
if not isinstance(value, datetime.datetime):
raise ValueError(_('Programming Error: value to be stored '
'must be a datetime object.'))
value = timeutils.normalize_time(value)
value = value.replace(tzinfo=pytz.UTC)
# NOTE(morgan): We are casting this to an int, and ensuring we
# preserve microsecond data by moving the decimal. This is easier
# than being concerned with the differences in Numeric types in
# different SQL backends.
return int((value - self.epoch).total_seconds() * 1000000)
def process_result_value(self, value, dialect):
if value is None:
return None
else:
# Convert from INT to appropriate micro-second float (microseconds
# after the decimal) from what was stored to the DB
value = float(value) / 1000000
# NOTE(morgan): Explictly use timezone "pytz.UTC" to ensure we are
# not adjusting the actual datetime object from what we stored.
dt_obj = datetime.datetime.fromtimestamp(value, tz=pytz.UTC)
# Return non-tz aware datetime object (as keystone expects)
return timeutils.normalize_time(dt_obj)
class ModelDictMixinWithExtras(models.ModelBase):
"""Mixin making model behave with dict-like interfaces includes extras.

View File

@ -0,0 +1,22 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
def upgrade(migrate_engine):
# A migration here is not needed because the actual marshalling of data
# from the old column to the new column is done in the contract phase. This
# is because using triggers to convert datetime objects to integers is
# complex and error-prone. Instead, we'll migrate the data once all
# keystone nodes are on the Pike code-base. From an operator perspective,
# this shouldn't affect operability of a rolling upgrade since all nodes
# must be running Pike before the contract takes place.
pass

View File

@ -0,0 +1,33 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sql
from keystone.common import sql as ks_sql
def upgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
# NOTE(morgan): column is nullable here for migration purposes
# it is set to not-nullable in the contract phase to ensure we can handle
# rolling upgrades in a sane way. This differs from the model in
# keystone.identity.backends.sql_model by design.
created_at = sql.Column('created_at_int', ks_sql.DateTimeInt(),
nullable=True)
expires_at = sql.Column('expires_at_int', ks_sql.DateTimeInt(),
nullable=True)
password_table = sql.Table('password', meta, autoload=True)
password_table.create_column(created_at)
password_table.create_column(expires_at)

View File

@ -278,7 +278,7 @@ class LocalUser(sql.ModelBase, sql.ModelDictMixin):
cascade='all,delete-orphan',
lazy='subquery',
backref='local_user',
order_by='Password.created_at')
order_by='Password.created_at_int')
failed_auth_count = sql.Column(sql.Integer, nullable=True)
failed_auth_at = sql.Column(sql.DateTime, nullable=True)
__table_args__ = (
@ -303,13 +303,40 @@ class Password(sql.ModelBase, sql.ModelDictMixin):
password = sql.Column(sql.String(128), nullable=True)
password_hash = sql.Column(sql.String(255), nullable=True)
# TODO(lbragstad): Once Rocky opens for development, the _created_at and
# _expires_at attributes/columns can be removed from the schema. The
# migration ensures all passwords are converted from datetime objects to
# big integers. The old datetime columns and their corresponding attributes
# in the model are no longer required.
# created_at default set here to safe guard in case it gets missed
created_at = sql.Column(sql.DateTime, nullable=False,
default=datetime.datetime.utcnow)
expires_at = sql.Column(sql.DateTime, nullable=True)
_created_at = sql.Column('created_at', sql.DateTime, nullable=False,
default=datetime.datetime.utcnow)
_expires_at = sql.Column('expires_at', sql.DateTime, nullable=True)
# set the default to 0, a 0 indicates it is unset.
created_at_int = sql.Column(sql.DateTimeInt(), nullable=False,
default=datetime.datetime.utcnow)
expires_at_int = sql.Column(sql.DateTimeInt(), nullable=True)
self_service = sql.Column(sql.Boolean, default=False, nullable=False,
server_default='0')
@hybrid_property
def created_at(self):
return self.created_at_int or self._created_at
@created_at.setter
def created_at(self, value):
self._created_at = value
self.created_at_int = value
@hybrid_property
def expires_at(self):
return self.expires_at_int or self._expires_at
@expires_at.setter
def expires_at(self, value):
self._expires_at = value
self.expires_at_int = value
class FederatedUser(sql.ModelBase, sql.ModelDictMixin):
__tablename__ = 'federated_user'

View File

@ -31,6 +31,28 @@ from keystone.tests.unit import test_backend_sql
CONF = keystone.conf.CONF
class UserPasswordCreatedAtIntTests(test_backend_sql.SqlTests):
def config_overrides(self):
super(UserPasswordCreatedAtIntTests, self).config_overrides()
self.config_fixture.config(group='security_compliance',
password_expires_days=1)
def test_user_password_created_expired_at_int_matches_created_at(self):
with sql.session_for_read() as session:
user_ref = self.identity_api._get_user(session,
self.user_foo['id'])
self.assertIsNotNone(user_ref.password_ref._created_at)
self.assertIsNotNone(user_ref.password_ref._expires_at)
self.assertEqual(user_ref.password_ref._created_at,
user_ref.password_ref.created_at_int)
self.assertEqual(user_ref.password_ref._expires_at,
user_ref.password_ref.expires_at_int)
self.assertEqual(user_ref.password_ref.created_at,
user_ref.password_ref.created_at_int)
self.assertEqual(user_ref.password_ref.expires_at,
user_ref.password_ref.expires_at_int)
class UserPasswordHashingTestsNoCompat(test_backend_sql.SqlTests):
def config_overrides(self):
super(UserPasswordHashingTestsNoCompat, self).config_overrides()

View File

@ -157,6 +157,8 @@ class SqlModels(SqlTests):
('password_hash', sql.String, 255),
('created_at', sql.DateTime, None),
('expires_at', sql.DateTime, None),
('created_at_int', sql.DateTimeInt, None),
('expires_at_int', sql.DateTimeInt, None),
('self_service', sql.Boolean, False))
self.assertExpectedSchema('password', cols)

View File

@ -38,6 +38,7 @@ WARNING::
all data will be lost.
"""
import datetime
import json
import os
import uuid
@ -50,6 +51,7 @@ import mock
from oslo_db import exception as db_exception
from oslo_db.sqlalchemy import test_base
from oslo_log import log
import pytz
from sqlalchemy.engine import reflection
import sqlalchemy.exc
from testtools import matchers
@ -2353,6 +2355,72 @@ class FullMigration(SqlMigrateBase, unit.TestCase):
self.assertTableColumns(user_option,
['user_id', 'option_id', 'option_value'])
def test_migration_024_add_created_expires_at_int_columns_password(self):
self.expand(23)
self.migrate(23)
self.contract(23)
password_table_name = 'password'
self.assertTableColumns(
password_table_name,
['id', 'local_user_id', 'password', 'password_hash', 'created_at',
'expires_at', 'self_service']
)
self.expand(24)
self.assertTableColumns(
password_table_name,
['id', 'local_user_id', 'password', 'password_hash', 'created_at',
'expires_at', 'created_at_int', 'expires_at_int', 'self_service']
)
# Create User and Local User
project_table = sqlalchemy.Table('project', self.metadata,
autoload=True)
domain_data = {'id': '_domain', 'domain_id': '_domain',
'enabled': True, 'name': '_domain', 'is_domain': True}
project_table.insert().values(domain_data).execute()
user_table = sqlalchemy.Table('user', self.metadata, autoload=True)
user_id = uuid.uuid4().hex
user = {'id': user_id, 'enabled': True, 'domain_id': domain_data['id']}
user_table.insert().values(user).execute()
local_user_table = sqlalchemy.Table('local_user', self.metadata,
autoload=True)
local_user = {
'id': 1, 'user_id': user_id, 'domain_id': user['domain_id'],
'name': 'name'}
local_user_table.insert().values(local_user).execute()
password_table = sqlalchemy.Table('password',
self.metadata, autoload=True)
password_data = {
'local_user_id': local_user['id'],
'created_at': datetime.datetime.utcnow(),
'expires_at': datetime.datetime.utcnow()}
password_table.insert().values(password_data).execute()
self.migrate(24)
self.contract(24)
passwords = list(password_table.select().execute())
epoch = datetime.datetime.fromtimestamp(0, tz=pytz.UTC)
for p in passwords:
c = (p.created_at.replace(tzinfo=pytz.UTC) - epoch).total_seconds()
e = (p.expires_at.replace(tzinfo=pytz.UTC) - epoch).total_seconds()
self.assertEqual(p.created_at_int, int(c * 1000000))
self.assertEqual(p.expires_at_int, int(e * 1000000))
# Test contract phase and ensure data can not be null
self.contract(24)
meta = sqlalchemy.MetaData(self.engine)
pw_table = sqlalchemy.Table('password', meta, autoload=True)
self.assertFalse(pw_table.c.created_at_int.nullable)
class MySQLOpportunisticFullMigration(FullMigration):
FIXTURE = test_base.MySQLOpportunisticFixture

View File

@ -0,0 +1,9 @@
---
upgrade:
- |
[`bug 1702211 <https://bugs.launchpad.net/keystone/+bug/1702211>`_]
Password `created_at` field under some versions/deployments of MySQL would
lose sub-second precision. This means that it was possible for passwords to
be returned out-of-order when changed within one second (especially common
in testing). This change stores password `created_at` and `expires_at` as
an integer instead of as a DATETIME data-type.

View File

@ -40,3 +40,4 @@ jsonschema!=2.5.0,<3.0.0,>=2.0.0 # MIT
pycadf!=2.0.0,>=1.1.0 # Apache-2.0
msgpack-python>=0.4.0 # Apache-2.0
osprofiler>=1.4.0 # Apache-2.0
pytz>=2013.6 # MIT

View File

@ -8,6 +8,7 @@ flake8-docstrings==0.2.1.post1 # MIT
bashate>=0.2 # Apache-2.0
os-testr>=0.8.0 # Apache-2.0
freezegun>=0.3.6 # Apache-2.0
pytz>=2013.6 # MIT
# Include drivers for opportunistic testing.
oslo.db[fixtures,mysql,postgresql]>=4.24.0 # Apache-2.0