Replaces functions in utils.py with openstack/common/timeutils.py

Fixes bug #1008628

1. Edit openstack-common.conf and import nova/openstack/common/timeutils.py
2. Move time related functions from utils.py to timeutils.py
3. Replace following functions in utils.py with timeutils.py
- isotime
- parse_isotime
- strtime
- parse_strtime
- normalize_time
- is_older_than
- utcnow_ts
- utcnow
- set_time_override
- advance_time_delta
- advance_time_seconds
- clear_time_override
4. Remove datetime related functions and datetime related unittests

Change-Id: I9a92be286fb071b6237dd39495d88dae106e2ce0
This commit is contained in:
Zhongyue Luo 2012-06-06 10:32:49 +08:00
parent fb9abcc839
commit 9ff3121bd9
51 changed files with 380 additions and 419 deletions

View File

@ -90,6 +90,7 @@ from nova import log as logging
from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import timeutils
from nova import quota
from nova import rpc
from nova.scheduler import rpcapi as scheduler_rpcapi
@ -968,7 +969,7 @@ class ServiceCommands(object):
Show a list of all running services. Filter by host & service name.
"""
ctxt = context.get_admin_context()
now = utils.utcnow()
now = timeutils.utcnow()
services = db.service_get_all(ctxt)
if host:
services = [s for s in services if s['host'] == host]
@ -1083,7 +1084,7 @@ class HostCommands(object):
print "%-25s\t%-15s" % (_('host'),
_('zone'))
ctxt = context.get_admin_context()
now = utils.utcnow()
now = timeutils.utcnow()
services = db.service_get_all(ctxt)
if zone:
services = [s for s in services if s['availability_zone'] == zone]

View File

@ -38,6 +38,7 @@ from nova import log as logging
from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import timeutils
from nova import utils
from nova import wsgi
@ -106,7 +107,7 @@ class RequestLogging(wsgi.Middleware):
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
start = utils.utcnow()
start = timeutils.utcnow()
rv = req.get_response(self.application)
self.log_request_completion(rv, req, start)
return rv
@ -120,7 +121,7 @@ class RequestLogging(wsgi.Middleware):
controller = None
action = None
ctxt = request.environ.get('nova.context', None)
delta = utils.utcnow() - start
delta = timeutils.utcnow() - start
seconds = delta.seconds
microseconds = delta.microseconds
LOG.info(

View File

@ -40,6 +40,7 @@ from nova.image import s3
from nova import log as logging
from nova import network
from nova.openstack.common import excutils
from nova.openstack.common import timeutils
from nova import quota
from nova import utils
from nova import volume
@ -666,7 +667,7 @@ class CloudController(object):
instance_id = ec2utils.ec2_id_to_id(ec2_id)
instance = self.compute_api.get(context, instance_id)
output = self.compute_api.get_console_output(context, instance)
now = utils.utcnow()
now = timeutils.utcnow()
return {"InstanceId": ec2_id,
"Timestamp": now,
"output": base64.b64encode(output)}

View File

@ -29,6 +29,7 @@ from nova import exception
from nova import flags
from nova import log as logging
from nova import network
from nova.openstack.common import timeutils
from nova import utils
@ -90,7 +91,7 @@ class CloudpipeController(object):
rv['state'] = 'pending'
return rv
rv['instance_id'] = instance['uuid']
rv['created_at'] = utils.isotime(instance['created_at'])
rv['created_at'] = timeutils.isotime(instance['created_at'])
nw_info = compute_utils.get_nw_info_for_instance(instance)
if not nw_info:
return rv

View File

@ -24,7 +24,7 @@ from nova.api.openstack import xmlutil
from nova.compute import api
from nova import exception
from nova import flags
from nova import utils
from nova.openstack.common import timeutils
FLAGS = flags.FLAGS
@ -73,13 +73,13 @@ class SimpleTenantUsageController(object):
terminated_at = instance['terminated_at']
if terminated_at is not None:
if not isinstance(terminated_at, datetime.datetime):
terminated_at = utils.parse_strtime(terminated_at,
"%Y-%m-%d %H:%M:%S.%f")
terminated_at = timeutils.parse_strtime(terminated_at,
"%Y-%m-%d %H:%M:%S.%f")
if launched_at is not None:
if not isinstance(launched_at, datetime.datetime):
launched_at = utils.parse_strtime(launched_at,
"%Y-%m-%d %H:%M:%S.%f")
launched_at = timeutils.parse_strtime(launched_at,
"%Y-%m-%d %H:%M:%S.%f")
if terminated_at and terminated_at < period_start:
return 0
@ -152,7 +152,7 @@ class SimpleTenantUsageController(object):
else:
info['state'] = instance['vm_state']
now = utils.utcnow()
now = timeutils.utcnow()
if info['state'] == 'terminated':
delta = info['ended_at'] - info['started_at']
@ -188,16 +188,16 @@ class SimpleTenantUsageController(object):
def _parse_datetime(self, dtstr):
if not dtstr:
return utils.utcnow()
return timeutils.utcnow()
elif isinstance(dtstr, datetime.datetime):
return dtstr
try:
return utils.parse_strtime(dtstr, "%Y-%m-%dT%H:%M:%S")
return timeutils.parse_strtime(dtstr, "%Y-%m-%dT%H:%M:%S")
except Exception:
try:
return utils.parse_strtime(dtstr, "%Y-%m-%dT%H:%M:%S.%f")
return timeutils.parse_strtime(dtstr, "%Y-%m-%dT%H:%M:%S.%f")
except Exception:
return utils.parse_strtime(dtstr, "%Y-%m-%d %H:%M:%S.%f")
return timeutils.parse_strtime(dtstr, "%Y-%m-%d %H:%M:%S.%f")
def _get_datetime_range(self, req):
qs = req.environ.get('QUERY_STRING', '')

View File

@ -32,6 +32,7 @@ from nova.compute import instance_types
from nova import exception
from nova import flags
from nova import log as logging
from nova.openstack.common import timeutils
from nova.rpc import common as rpc_common
from nova import utils
@ -426,7 +427,7 @@ class Controller(wsgi.Controller):
if 'changes-since' in search_opts:
try:
parsed = utils.parse_isotime(search_opts['changes-since'])
parsed = timeutils.parse_isotime(search_opts['changes-since'])
except ValueError:
msg = _('Invalid changes-since value')
raise exc.HTTPBadRequest(explanation=msg)

View File

@ -20,7 +20,7 @@ from lxml import etree
from nova.api.openstack.compute.views import versions as views_versions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import utils
from nova.openstack.common import timeutils
LINKS = {
@ -124,8 +124,8 @@ class AtomSerializer(wsgi.XMLDictSerializer):
def _get_most_recent_update(self, versions):
recent = None
for version in versions:
updated = utils.parse_strtime(version['updated'],
'%Y-%m-%dT%H:%M:%SZ')
updated = timeutils.parse_strtime(version['updated'],
'%Y-%m-%dT%H:%M:%SZ')
if not recent:
recent = updated
elif updated > recent:

View File

@ -17,7 +17,7 @@
import datetime
from nova import utils
from nova.openstack.common import timeutils
class ViewBuilder(object):
@ -92,5 +92,5 @@ class ViewBuilder(object):
"value": rate_limit["value"],
"remaining": int(rate_limit["remaining"]),
"unit": rate_limit["unit"],
"next-available": utils.isotime(at=next_avail),
"next-available": timeutils.isotime(at=next_avail),
}

View File

@ -23,7 +23,7 @@ from nova.api.openstack.compute.views import addresses as views_addresses
from nova.api.openstack.compute.views import flavors as views_flavors
from nova.api.openstack.compute.views import images as views_images
from nova import log as logging
from nova import utils
from nova.openstack.common import timeutils
LOG = logging.getLogger(__name__)
@ -99,8 +99,8 @@ class ViewBuilder(common.ViewBuilder):
"hostId": self._get_host_id(instance) or "",
"image": self._get_image(request, instance),
"flavor": self._get_flavor(request, instance),
"created": utils.isotime(instance["created_at"]),
"updated": utils.isotime(instance["updated_at"]),
"created": timeutils.isotime(instance["created_at"]),
"updated": timeutils.isotime(instance["updated_at"]),
"addresses": self._get_addresses(request, instance),
"accessIPv4": instance.get("access_ip_v4") or "",
"accessIPv6": instance.get("access_ip_v6") or "",
@ -199,7 +199,7 @@ class ViewBuilder(common.ViewBuilder):
fault_dict = {
"code": fault["code"],
"created": utils.isotime(fault["created_at"]),
"created": timeutils.isotime(fault["created_at"]),
"message": fault["message"],
}

View File

@ -18,7 +18,7 @@
"""Super simple fake memcache client."""
from nova import utils
from nova.openstack.common import timeutils
class Client(object):
@ -35,7 +35,7 @@ class Client(object):
for k in self.cache.keys():
(timeout, _value) = self.cache[k]
if timeout and utils.utcnow_ts() >= timeout:
if timeout and timeutils.utcnow_ts() >= timeout:
del self.cache[k]
return self.cache.get(key, (0, None))[1]
@ -44,7 +44,7 @@ class Client(object):
"""Sets the value for a key."""
timeout = 0
if time != 0:
timeout = utils.utcnow_ts() + time
timeout = timeutils.utcnow_ts() + time
self.cache[key] = (timeout, value)
return True

View File

@ -46,6 +46,7 @@ from nova import notifications
from nova.openstack.common import excutils
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import timeutils
import nova.policy
from nova import quota
from nova.scheduler import rpcapi as scheduler_rpcapi
@ -828,7 +829,7 @@ class API(base.Base):
self.update(context,
instance,
task_state=task_states.POWERING_OFF,
deleted_at=utils.utcnow())
deleted_at=timeutils.utcnow())
self.compute_rpcapi.power_off_instance(context, instance)
else:

View File

@ -68,6 +68,7 @@ from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import timeutils
from nova import rpc
from nova import utils
from nova.virt import driver
@ -480,15 +481,15 @@ class ComputeManager(manager.SchedulerDependentManager):
@manager.periodic_task
def _check_instance_build_time(self, context):
"""Ensure that instances are not stuck in build."""
if FLAGS.instance_build_timeout == 0:
timeout = FLAGS.instance_build_timeout
if timeout == 0:
return
filters = {'vm_state': vm_states.BUILDING}
building_insts = self.db.instance_get_all_by_filters(context, filters)
for instance in building_insts:
if utils.is_older_than(instance['created_at'],
FLAGS.instance_build_timeout):
if timeutils.is_older_than(instance['created_at'], timeout):
self._set_instance_error_state(context, instance['uuid'])
LOG.warn(_("Instance build timed out. Set to error state."),
instance=instance)
@ -643,7 +644,7 @@ class ComputeManager(manager.SchedulerDependentManager):
power_state=current_power_state,
vm_state=vm_states.ACTIVE,
task_state=None,
launched_at=utils.utcnow())
launched_at=timeutils.utcnow())
def _notify_about_instance_usage(self, context, instance, event_suffix,
network_info=None, system_metadata=None,
@ -758,7 +759,7 @@ class ComputeManager(manager.SchedulerDependentManager):
instance_uuid,
vm_state=vm_states.DELETED,
task_state=None,
terminated_at=utils.utcnow())
terminated_at=timeutils.utcnow())
# Pull the system_metadata before we delete the instance, so we
# can pass it to delete.end notification, as it will not be able
# to look it up anymore, if it needs it.
@ -929,7 +930,7 @@ class ComputeManager(manager.SchedulerDependentManager):
power_state=current_power_state,
vm_state=vm_states.ACTIVE,
task_state=None,
launched_at=utils.utcnow())
launched_at=timeutils.utcnow())
self._notify_about_instance_usage(context, instance, "rebuild.end",
network_info=network_info)
@ -1308,7 +1309,7 @@ class ComputeManager(manager.SchedulerDependentManager):
root_gb=instance_type['root_gb'],
ephemeral_gb=instance_type['ephemeral_gb'],
instance_type_id=instance_type['id'],
launched_at=utils.utcnow(),
launched_at=timeutils.utcnow(),
vm_state=vm_states.ACTIVE,
task_state=None)
@ -1460,7 +1461,7 @@ class ComputeManager(manager.SchedulerDependentManager):
instance_ref.uuid,
vm_state=vm_states.ACTIVE,
host=migration_ref['dest_compute'],
launched_at=utils.utcnow(),
launched_at=timeutils.utcnow(),
task_state=task_states.RESIZE_VERIFY)
self.db.migration_update(context, migration_ref.id,
@ -2487,15 +2488,16 @@ class ComputeManager(manager.SchedulerDependentManager):
@manager.periodic_task
def _reclaim_queued_deletes(self, context):
"""Reclaim instances that are queued for deletion."""
if FLAGS.reclaim_instance_interval <= 0:
interval = FLAGS.reclaim_instance_interval
if interval <= 0:
LOG.debug(_("FLAGS.reclaim_instance_interval <= 0, skipping..."))
return
instances = self.db.instance_get_all_by_host(context, self.host)
for instance in instances:
old_enough = (not instance.deleted_at or utils.is_older_than(
instance.deleted_at,
FLAGS.reclaim_instance_interval))
old_enough = (not instance.deleted_at or
timeutils.is_older_than(instance.deleted_at,
interval))
soft_deleted = instance.vm_state == vm_states.SOFT_DELETE
if soft_deleted and old_enough:
@ -2590,11 +2592,12 @@ class ComputeManager(manager.SchedulerDependentManager):
should be pushed down to the virt layer for efficiency.
"""
def deleted_instance(instance):
timeout = FLAGS.running_deleted_instance_timeout
present = instance.name in present_name_labels
erroneously_running = instance.deleted and present
old_enough = (not instance.deleted_at or utils.is_older_than(
instance.deleted_at,
FLAGS.running_deleted_instance_timeout))
old_enough = (not instance.deleted_at or
timeutils.is_older_than(instance.deleted_at,
timeout))
if erroneously_running and old_enough:
return True
return False

View File

@ -23,6 +23,7 @@ import copy
from nova import log as logging
from nova.openstack.common import local
from nova.openstack.common import timeutils
from nova import utils
@ -71,9 +72,9 @@ class RequestContext(object):
self.read_deleted = read_deleted
self.remote_address = remote_address
if not timestamp:
timestamp = utils.utcnow()
timestamp = timeutils.utcnow()
if isinstance(timestamp, basestring):
timestamp = utils.parse_strtime(timestamp)
timestamp = timeutils.parse_strtime(timestamp)
self.timestamp = timestamp
if not request_id:
request_id = generate_request_id()
@ -115,7 +116,7 @@ class RequestContext(object):
'read_deleted': self.read_deleted,
'roles': self.roles,
'remote_address': self.remote_address,
'timestamp': utils.strtime(self.timestamp),
'timestamp': timeutils.strtime(self.timestamp),
'request_id': self.request_id,
'auth_token': self.auth_token,
'quota_class': self.quota_class,

View File

@ -37,6 +37,7 @@ from nova import exception
from nova import flags
from nova import log as logging
from nova.openstack.common import cfg
from nova.openstack.common import timeutils
from nova import utils
@ -210,12 +211,12 @@ def revoke_certs_by_user_and_project(user_id, project_id):
def _project_cert_subject(project_id):
"""Helper to generate user cert subject."""
return FLAGS.project_cert_subject % (project_id, utils.isotime())
return FLAGS.project_cert_subject % (project_id, timeutils.isotime())
def _user_cert_subject(user_id, project_id):
"""Helper to generate user cert subject."""
return FLAGS.user_cert_subject % (project_id, user_id, utils.isotime())
return FLAGS.user_cert_subject % (project_id, user_id, timeutils.isotime())
def generate_x509_cert(user_id, project_id, bits=1024):

View File

@ -34,6 +34,7 @@ from nova.db.sqlalchemy.session import get_session
from nova import exception
from nova import flags
from nova import log as logging
from nova.openstack.common import timeutils
from nova import utils
from sqlalchemy import and_
from sqlalchemy.exc import IntegrityError
@ -1080,7 +1081,7 @@ def fixed_ip_disassociate_all_by_timeout(context, host, time):
filter(models.FixedIp.id.in_(fixed_ip_ids)).\
update({'instance_id': None,
'leased': False,
'updated_at': utils.utcnow()},
'updated_at': timeutils.utcnow()},
synchronize_session='fetch')
return result
@ -1370,29 +1371,29 @@ def instance_destroy(context, instance_uuid, constraint=None):
if constraint is not None:
query = constraint.apply(models.Instance, query)
count = query.update({'deleted': True,
'deleted_at': utils.utcnow(),
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
if count == 0:
raise exception.ConstraintNotMet()
session.query(models.SecurityGroupInstanceAssociation).\
filter_by(instance_uuid=instance_ref['uuid']).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
session.query(models.InstanceMetadata).\
filter_by(instance_uuid=instance_ref['uuid']).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
session.query(models.InstanceSystemMetadata).\
filter_by(instance_uuid=instance_ref['uuid']).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
session.query(models.BlockDeviceMapping).\
filter_by(instance_uuid=instance_ref['uuid']).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
instance_info_cache_delete(context, instance_ref['uuid'],
@ -1487,7 +1488,7 @@ def instance_get_all_by_filters(context, filters, sort_key, sort_dir):
filters = filters.copy()
if 'changes-since' in filters:
changes_since = utils.normalize_time(filters['changes-since'])
changes_since = timeutils.normalize_time(filters['changes-since'])
query_prefix = query_prefix.\
filter(models.Instance.updated_at > changes_since)
@ -1640,7 +1641,8 @@ def instance_get_floating_address(context, instance_id):
@require_admin_context
def instance_get_all_hung_in_rebooting(context, reboot_window, session=None):
reboot_window = utils.utcnow() - datetime.timedelta(seconds=reboot_window)
reboot_window = (timeutils.utcnow() -
datetime.timedelta(seconds=reboot_window))
if not session:
session = get_session()
@ -1765,7 +1767,7 @@ def instance_remove_security_group(context, instance_uuid, security_group_id):
filter_by(instance_uuid=instance_ref['uuid']).\
filter_by(security_group_id=security_group_id).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@ -1848,7 +1850,7 @@ def instance_info_cache_delete(context, instance_uuid, session=None):
:param session: = optional session object
"""
values = {'deleted': True,
'deleted_at': utils.utcnow()}
'deleted_at': timeutils.utcnow()}
instance_info_cache_update(context, instance_uuid, values, session)
@ -1880,7 +1882,7 @@ def key_pair_destroy_all_by_user(context, user_id):
session.query(models.KeyPair).\
filter_by(user_id=user_id).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@ -2018,7 +2020,7 @@ def network_delete_safe(context, network_id):
filter_by(deleted=False).\
update({'deleted': True,
'updated_at': literal_column('updated_at'),
'deleted_at': utils.utcnow()})
'deleted_at': timeutils.utcnow()})
session.delete(network_ref)
@ -2626,7 +2628,7 @@ def quota_reserve(context, resources, quotas, deltas, expire,
if usages[resource].until_refresh <= 0:
refresh = True
elif max_age and (usages[resource].updated_at -
utils.utcnow()).seconds >= max_age:
timeutils.utcnow()).seconds >= max_age:
refresh = True
# OK, refresh the usage
@ -2798,9 +2800,10 @@ def quota_destroy_all_by_project(context, project_id):
def reservation_expire(context):
session = get_session()
with session.begin():
current_time = timeutils.utcnow()
results = model_query(context, models.Reservation, session=session,
read_deleted="no").\
filter(models.Reservation.expire < utils.utcnow()).\
filter(models.Reservation.expire < current_time).\
all()
if results:
@ -2849,7 +2852,7 @@ def volume_attached(context, volume_id, instance_uuid, mountpoint):
volume_ref['mountpoint'] = mountpoint
volume_ref['attach_status'] = 'attached'
volume_ref['instance_uuid'] = instance_uuid
volume_ref['attach_time'] = utils.utcnow()
volume_ref['attach_time'] = timeutils.utcnow()
volume_ref.save(session=session)
@ -2892,7 +2895,7 @@ def volume_destroy(context, volume_id):
session.query(models.Volume).\
filter_by(id=volume_id).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
session.query(models.IscsiTarget).\
filter_by(volume_id=volume_id).\
@ -2900,7 +2903,7 @@ def volume_destroy(context, volume_id):
session.query(models.VolumeMetadata).\
filter_by(volume_id=volume_id).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
return volume_ref
@ -3115,7 +3118,7 @@ def volume_metadata_delete(context, volume_id, key):
_volume_metadata_get_query(context, volume_id).\
filter_by(key=key).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@ -3192,7 +3195,7 @@ def snapshot_destroy(context, snapshot_id):
session.query(models.Snapshot).\
filter_by(id=snapshot_id).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@ -3291,7 +3294,7 @@ def block_device_mapping_update_or_create(context, values):
filter(models.BlockDeviceMapping.device_name !=
values['device_name']).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@ -3309,7 +3312,7 @@ def block_device_mapping_destroy(context, bdm_id):
session.query(models.BlockDeviceMapping).\
filter_by(id=bdm_id).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@ -3322,7 +3325,7 @@ def block_device_mapping_destroy_by_instance_and_volume(context, instance_uuid,
filter_by(instance_uuid=instance_uuid).\
filter_by(volume_id=volume_id).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@ -3433,17 +3436,17 @@ def security_group_destroy(context, security_group_id):
session.query(models.SecurityGroup).\
filter_by(id=security_group_id).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
session.query(models.SecurityGroupInstanceAssociation).\
filter_by(security_group_id=security_group_id).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
session.query(models.SecurityGroupIngressRule).\
filter_by(group_id=security_group_id).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@ -3544,7 +3547,7 @@ def provider_fw_rule_destroy(context, rule_id):
session.query(models.ProviderFirewallRule).\
filter_by(id=rule_id).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@ -3818,7 +3821,7 @@ def migration_get_by_instance_and_status(context, instance_uuid, status):
@require_admin_context
def migration_get_all_unconfirmed(context, confirm_window, session=None):
confirm_window = (utils.utcnow() -
confirm_window = (timeutils.utcnow() -
datetime.timedelta(seconds=confirm_window))
return model_query(context, models.Migration, session=session,
@ -4069,12 +4072,12 @@ def instance_type_destroy(context, name):
session.query(models.InstanceTypes).\
filter_by(id=instance_type_id).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
session.query(models.InstanceTypeExtraSpecs).\
filter_by(instance_type_id=instance_type_id).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@ -4105,7 +4108,7 @@ def instance_metadata_delete(context, instance_uuid, key):
_instance_metadata_get_query(context, instance_uuid).\
filter_by(key=key).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@ -4187,7 +4190,7 @@ def instance_system_metadata_delete(context, instance_uuid, key):
_instance_system_metadata_get_query(context, instance_uuid).\
filter_by(key=key).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@ -4278,7 +4281,7 @@ def agent_build_destroy(context, agent_build_id):
read_deleted="yes").\
filter_by(id=agent_build_id).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@ -4328,7 +4331,7 @@ def bw_usage_update(context,
bwusage.uuid = uuid
bwusage.mac = mac
bwusage.last_refreshed = utils.utcnow()
bwusage.last_refreshed = timeutils.utcnow()
bwusage.bw_in = bw_in
bwusage.bw_out = bw_out
bwusage.save(session=session)
@ -4363,7 +4366,7 @@ def instance_type_extra_specs_delete(context, instance_type_id, key):
context, instance_type_id).\
filter_by(key=key).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@ -4492,12 +4495,12 @@ def volume_type_destroy(context, name):
session.query(models.VolumeTypes).\
filter_by(id=volume_type_id).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
session.query(models.VolumeTypeExtraSpecs).\
filter_by(volume_type_id=volume_type_id).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@ -4544,7 +4547,7 @@ def volume_type_extra_specs_delete(context, volume_type_id, key):
_volume_type_extra_specs_query(context, volume_type_id).\
filter_by(key=key).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@ -4873,7 +4876,7 @@ def aggregate_delete(context, aggregate_id):
aggregate_id)
if query.first():
query.update({'deleted': True,
'deleted_at': utils.utcnow(),
'deleted_at': timeutils.utcnow(),
'operational_state': aggregate_states.DISMISSED,
'updated_at': literal_column('updated_at')})
else:
@ -4905,7 +4908,7 @@ def aggregate_metadata_delete(context, aggregate_id, key):
filter_by(key=key)
if query.first():
query.update({'deleted': True,
'deleted_at': utils.utcnow(),
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
else:
raise exception.AggregateMetadataNotFound(aggregate_id=aggregate_id,
@ -4981,7 +4984,7 @@ def aggregate_host_delete(context, aggregate_id, host):
aggregate_id).filter_by(host=host)
if query.first():
query.update({'deleted': True,
'deleted_at': utils.utcnow(),
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
else:
raise exception.AggregateHostNotFound(aggregate_id=aggregate_id,

View File

@ -32,7 +32,7 @@ from nova.db.sqlalchemy.session import get_session
from nova import exception
from nova import flags
from nova import utils
from nova.openstack.common import timeutils
FLAGS = flags.FLAGS
@ -43,8 +43,8 @@ class NovaBase(object):
"""Base class for Nova Models."""
__table_args__ = {'mysql_engine': 'InnoDB'}
__table_initialized__ = False
created_at = Column(DateTime, default=utils.utcnow)
updated_at = Column(DateTime, onupdate=utils.utcnow)
created_at = Column(DateTime, default=timeutils.utcnow)
updated_at = Column(DateTime, onupdate=timeutils.utcnow)
deleted_at = Column(DateTime)
deleted = Column(Boolean, default=False)
metadata = None
@ -65,7 +65,7 @@ class NovaBase(object):
def delete(self, session=None):
"""Delete this object."""
self.deleted = True
self.deleted_at = utils.utcnow()
self.deleted_at = timeutils.utcnow()
self.save(session=session)
def __setitem__(self, key, value):

View File

@ -32,6 +32,7 @@ from nova import flags
from nova import log as logging
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import timeutils
from nova import utils
@ -404,7 +405,7 @@ def _parse_glance_iso8601_timestamp(timestamp):
for iso_format in iso_formats:
try:
return utils.parse_strtime(timestamp, iso_format)
return timeutils.parse_strtime(timestamp, iso_format)
except ValueError:
pass

View File

@ -67,6 +67,7 @@ from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import timeutils
import nova.policy
from nova import quota
from nova import rpc
@ -819,7 +820,7 @@ class NetworkManager(manager.SchedulerDependentManager):
@manager.periodic_task
def _disassociate_stale_fixed_ips(self, context):
if self.timeout_fixed_ips:
now = utils.utcnow()
now = timeutils.utcnow()
timeout = FLAGS.fixed_ip_disassociate_timeout
time = now - datetime.timedelta(seconds=timeout)
num = self.db.fixed_ip_disassociate_all_by_timeout(context,
@ -1303,7 +1304,7 @@ class NetworkManager(manager.SchedulerDependentManager):
if fixed_ip['instance_id'] is None:
msg = _('IP %s leased that is not associated') % address
raise exception.NovaException(msg)
now = utils.utcnow()
now = timeutils.utcnow()
self.db.fixed_ip_update(context,
fixed_ip['address'],
{'leased': True,

View File

@ -28,6 +28,7 @@ from nova import network
from nova.network import model as network_model
from nova.notifier import api as notifier_api
from nova.openstack.common import cfg
from nova.openstack.common import timeutils
from nova import utils
LOG = log.getLogger(__name__)
@ -140,7 +141,7 @@ def audit_period_bounds(current_period=False):
begin, end = utils.last_completed_audit_period()
if current_period:
audit_start = end
audit_end = utils.utcnow()
audit_end = timeutils.utcnow()
else:
audit_start = begin
audit_end = end

View File

@ -21,7 +21,7 @@ from nova import log as logging
from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova import utils
from nova.openstack.common import timeutils
LOG = logging.getLogger(__name__)
@ -110,7 +110,7 @@ def notify(context, publisher_id, event_type, priority, payload):
{'message_id': str(uuid.uuid4()),
'publisher_id': 'compute.host1',
'timestamp': utils.utcnow(),
'timestamp': timeutils.utcnow(),
'priority': 'WARN',
'event_type': 'compute.create_instance',
'payload': {'instance_id': 12, ... }}
@ -129,7 +129,7 @@ def notify(context, publisher_id, event_type, priority, payload):
event_type=event_type,
priority=priority,
payload=payload,
timestamp=str(utils.utcnow()))
timestamp=str(timeutils.utcnow()))
try:
driver.notify(context, msg)
except Exception, e:

View File

@ -0,0 +1,109 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Time related utilities and helper functions.
"""
import calendar
import datetime
import time
import iso8601
TIME_FORMAT = "%Y-%m-%dT%H:%M:%S"
PERFECT_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%f"
def isotime(at=None):
"""Stringify time in ISO 8601 format"""
if not at:
at = utcnow()
str = at.strftime(TIME_FORMAT)
tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC'
str += ('Z' if tz == 'UTC' else tz)
return str
def parse_isotime(timestr):
"""Parse time from ISO 8601 format"""
try:
return iso8601.parse_date(timestr)
except iso8601.ParseError as e:
raise ValueError(e.message)
except TypeError as e:
raise ValueError(e.message)
def strtime(at=None, fmt=PERFECT_TIME_FORMAT):
"""Returns formatted utcnow."""
if not at:
at = utcnow()
return at.strftime(fmt)
def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT):
"""Turn a formatted time back into a datetime."""
return datetime.datetime.strptime(timestr, fmt)
def normalize_time(timestamp):
"""Normalize time in arbitrary timezone to UTC"""
offset = timestamp.utcoffset()
return timestamp.replace(tzinfo=None) - offset if offset else timestamp
def is_older_than(before, seconds):
"""Return True if before is older than seconds."""
return utcnow() - before > datetime.timedelta(seconds=seconds)
def utcnow_ts():
"""Timestamp version of our utcnow function."""
return calendar.timegm(utcnow().timetuple())
def utcnow():
"""Overridable version of utils.utcnow."""
if utcnow.override_time:
return utcnow.override_time
return datetime.datetime.utcnow()
utcnow.override_time = None
def set_time_override(override_time=datetime.datetime.utcnow()):
"""Override utils.utcnow to return a constant time."""
utcnow.override_time = override_time
def advance_time_delta(timedelta):
"""Advance overriden time using a datetime.timedelta."""
assert(not utcnow.override_time is None)
utcnow.override_time += timedelta
def advance_time_seconds(seconds):
"""Advance overriden time by seconds."""
advance_time_delta(datetime.timedelta(0, seconds))
def clear_time_override():
"""Remove the overridden time."""
utcnow.override_time = None

View File

@ -26,7 +26,7 @@ from nova import flags
from nova import log as logging
from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova import utils
from nova.openstack.common import timeutils
LOG = logging.getLogger(__name__)
@ -318,7 +318,7 @@ class DbQuotaDriver(object):
if isinstance(expire, (int, long)):
expire = datetime.timedelta(seconds=expire)
if isinstance(expire, datetime.timedelta):
expire = utils.utcnow() + expire
expire = timeutils.utcnow() + expire
if not isinstance(expire, datetime.datetime):
raise exception.InvalidReservationExpiration(expire=expire)

View File

@ -33,6 +33,7 @@ from nova import notifications
from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import timeutils
from nova import rpc
from nova import utils
@ -58,7 +59,7 @@ def cast_to_volume_host(context, host, method, update_db=True, **kwargs):
if update_db:
volume_id = kwargs.get('volume_id', None)
if volume_id is not None:
now = utils.utcnow()
now = timeutils.utcnow()
db.volume_update(context, volume_id,
{'host': host, 'scheduled_at': now})
rpc.cast(context,
@ -75,7 +76,7 @@ def cast_to_compute_host(context, host, method, update_db=True, **kwargs):
instance_id = kwargs.get('instance_id', None)
instance_uuid = kwargs.get('instance_uuid', instance_id)
if instance_uuid is not None:
now = utils.utcnow()
now = timeutils.utcnow()
db.instance_update(context, instance_uuid,
{'host': host, 'scheduled_at': now})
rpc.cast(context,

View File

@ -25,8 +25,8 @@ from nova import exception
from nova import flags
from nova import log as logging
from nova.openstack.common import cfg
from nova.openstack.common import timeutils
from nova.scheduler import filters
from nova import utils
host_manager_opts = [
@ -274,7 +274,7 @@ class HostManager(object):
service_caps = self.service_states.get(host, {})
# Copy the capabilities, so we don't modify the original dict
capab_copy = dict(capabilities)
capab_copy["timestamp"] = utils.utcnow() # Reported time
capab_copy["timestamp"] = timeutils.utcnow() # Reported time
service_caps[service_name] = capab_copy
self.service_states[host] = service_caps
@ -282,7 +282,7 @@ class HostManager(object):
"""Check if host service capabilites are not recent enough."""
allowed_time_diff = FLAGS.periodic_interval * 3
caps = self.service_states[host][service]
if ((utils.utcnow() - caps["timestamp"]) <=
if ((timeutils.utcnow() - caps["timestamp"]) <=
datetime.timedelta(seconds=allowed_time_diff)):
return False
return True

View File

@ -29,7 +29,7 @@ import os
from nova import flags
from nova import log as logging
from nova.openstack.common import cfg
from nova import utils
from nova.openstack.common import timeutils
scheduler_json_config_location_opt = cfg.StrOpt(
@ -81,7 +81,7 @@ class SchedulerOptions(object):
def _get_time_now(self):
"""Get current UTC. Broken out for testing."""
return utils.utcnow()
return timeutils.utcnow()
def get_configuration(self, filename=None):
"""Check the json file for changes and load it if needed."""

View File

@ -35,10 +35,10 @@ from nova import flags
import nova.image.fake
from nova import log as logging
from nova.openstack.common import cfg
from nova.openstack.common import timeutils
from nova import service
from nova import tests
from nova.tests import fake_flags
from nova import utils
from nova.virt import fake
@ -131,7 +131,7 @@ class TestCase(unittest.TestCase):
# NOTE(vish): We need a better method for creating fixtures for tests
# now that we have some required db setup for the system
# to work properly.
self.start = utils.utcnow()
self.start = timeutils.utcnow()
tests.reset_db()
# emulate some of the mox stuff, we can't use the metaclass

View File

@ -25,8 +25,8 @@ from nova.api import ec2
from nova import context
from nova import exception
from nova import flags
from nova.openstack.common import timeutils
from nova import test
from nova import utils
FLAGS = flags.FLAGS
@ -43,11 +43,11 @@ class LockoutTestCase(test.TestCase):
"""Test case for the Lockout middleware."""
def setUp(self): # pylint: disable=C0103
super(LockoutTestCase, self).setUp()
utils.set_time_override()
timeutils.set_time_override()
self.lockout = ec2.Lockout(conditional_forbid)
def tearDown(self): # pylint: disable=C0103
utils.clear_time_override()
timeutils.clear_time_override()
super(LockoutTestCase, self).tearDown()
def _send_bad_attempts(self, access_key, num_attempts=1):
@ -68,21 +68,21 @@ class LockoutTestCase(test.TestCase):
def test_timeout(self):
self._send_bad_attempts('test', FLAGS.lockout_attempts)
self.assertTrue(self._is_locked_out('test'))
utils.advance_time_seconds(FLAGS.lockout_minutes * 60)
timeutils.advance_time_seconds(FLAGS.lockout_minutes * 60)
self.assertFalse(self._is_locked_out('test'))
def test_multiple_keys(self):
self._send_bad_attempts('test1', FLAGS.lockout_attempts)
self.assertTrue(self._is_locked_out('test1'))
self.assertFalse(self._is_locked_out('test2'))
utils.advance_time_seconds(FLAGS.lockout_minutes * 60)
timeutils.advance_time_seconds(FLAGS.lockout_minutes * 60)
self.assertFalse(self._is_locked_out('test1'))
self.assertFalse(self._is_locked_out('test2'))
def test_window_timeout(self):
self._send_bad_attempts('test', FLAGS.lockout_attempts - 1)
self.assertFalse(self._is_locked_out('test'))
utils.advance_time_seconds(FLAGS.lockout_window * 60)
timeutils.advance_time_seconds(FLAGS.lockout_window * 60)
self._send_bad_attempts('test', FLAGS.lockout_attempts - 1)
self.assertFalse(self._is_locked_out('test'))

View File

@ -20,6 +20,7 @@ from nova.api.openstack import wsgi
from nova.compute import utils as compute_utils
from nova import db
from nova import flags
from nova.openstack.common import timeutils
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import fake_network
@ -30,9 +31,11 @@ FLAGS = flags.FLAGS
def fake_vpn_instance():
return {'id': 7, 'image_ref': FLAGS.vpn_image_id, 'vm_state': 'active',
'created_at': utils.parse_strtime('1981-10-20T00:00:00.000000'),
'uuid': 7777, 'project_id': 'other'}
return {
'id': 7, 'image_ref': FLAGS.vpn_image_id, 'vm_state': 'active',
'created_at': timeutils.parse_strtime('1981-10-20T00:00:00.000000'),
'uuid': 7777, 'project_id': 'other',
}
def compute_api_get_all_empty(context):
@ -154,7 +157,7 @@ class CloudpipesXMLSerializerTest(test.TestCase):
public_ip='1.2.3.4',
public_port='321',
instance_id='1234-1234-1234-1234',
created_at=utils.isotime(utils.utcnow()),
created_at=timeutils.isotime(),
state='running')),
dict(cloudpipe=dict(
project_id='4321',

View File

@ -26,10 +26,10 @@ from nova import context
from nova import flags
from nova.openstack.common import jsonutils
from nova.openstack.common import policy as common_policy
from nova.openstack.common import timeutils
from nova import policy
from nova import test
from nova.tests.api.openstack import fakes
from nova import utils
FLAGS = flags.FLAGS
@ -41,7 +41,7 @@ ROOT_GB = 10
EPHEMERAL_GB = 20
MEMORY_MB = 1024
VCPUS = 2
STOP = utils.utcnow()
STOP = timeutils.utcnow()
START = STOP - datetime.timedelta(hours=HOURS)
@ -226,7 +226,7 @@ class SimpleTenantUsageSerializerTest(test.TestCase):
def test_serializer_show(self):
serializer = simple_tenant_usage.SimpleTenantUsageTemplate()
today = utils.utcnow()
today = timeutils.utcnow()
yesterday = today - datetime.timedelta(days=1)
raw_usage = dict(
tenant_id='tenant',
@ -272,7 +272,7 @@ class SimpleTenantUsageSerializerTest(test.TestCase):
def test_serializer_index(self):
serializer = simple_tenant_usage.SimpleTenantUsagesTemplate()
today = utils.utcnow()
today = timeutils.utcnow()
yesterday = today - datetime.timedelta(days=1)
raw_usages = [dict(
tenant_id='tenant1',

View File

@ -22,9 +22,9 @@ from nova import exception
from nova import flags
from nova import log as logging
from nova.openstack.common import jsonutils
from nova.openstack.common import timeutils
from nova import test
from nova.tests.api.openstack import fakes
from nova import utils
from nova import volume
FLAGS = flags.FLAGS
@ -249,7 +249,7 @@ class SnapshotSerializerTest(test.TestCase):
id='snap_id',
status='snap_status',
size=1024,
createdAt=utils.utcnow(),
createdAt=timeutils.utcnow(),
displayName='snap_name',
displayDescription='snap_desc',
volumeId='vol_id',
@ -267,7 +267,7 @@ class SnapshotSerializerTest(test.TestCase):
id='snap1_id',
status='snap1_status',
size=1024,
createdAt=utils.utcnow(),
createdAt=timeutils.utcnow(),
displayName='snap1_name',
displayDescription='snap1_desc',
volumeId='vol1_id',
@ -276,7 +276,7 @@ class SnapshotSerializerTest(test.TestCase):
id='snap2_id',
status='snap2_status',
size=1024,
createdAt=utils.utcnow(),
createdAt=timeutils.utcnow(),
displayName='snap2_name',
displayDescription='snap2_desc',
volumeId='vol2_id',

View File

@ -25,9 +25,9 @@ from nova import context
import nova.db
from nova import flags
from nova.openstack.common import jsonutils
from nova.openstack.common import timeutils
from nova import test
from nova.tests.api.openstack import fakes
from nova import utils
from nova import volume
from webob import exc
@ -364,7 +364,7 @@ class VolumeSerializerTest(test.TestCase):
status='vol_status',
size=1024,
availabilityZone='vol_availability',
createdAt=utils.utcnow(),
createdAt=timeutils.utcnow(),
attachments=[dict(
id='vol_id',
volumeId='vol_id',
@ -393,7 +393,7 @@ class VolumeSerializerTest(test.TestCase):
status='vol1_status',
size=1024,
availabilityZone='vol1_availability',
createdAt=utils.utcnow(),
createdAt=timeutils.utcnow(),
attachments=[dict(
id='vol1_id',
volumeId='vol1_id',
@ -413,7 +413,7 @@ class VolumeSerializerTest(test.TestCase):
status='vol2_status',
size=1024,
availabilityZone='vol2_availability',
createdAt=utils.utcnow(),
createdAt=timeutils.utcnow(),
attachments=[dict(
id='vol2_id',
volumeId='vol2_id',

View File

@ -27,6 +27,7 @@ from nova import console
from nova import db
from nova import exception
from nova import flags
from nova.openstack.common import timeutils
from nova import test
from nova.tests.api.openstack import fakes
from nova import utils
@ -108,9 +109,9 @@ def stub_instance(id, user_id='fake', project_id='fake', host=None,
"user_data": "",
"reservation_id": reservation_id,
"mac_address": "",
"scheduled_at": utils.utcnow(),
"launched_at": utils.utcnow(),
"terminated_at": utils.utcnow(),
"scheduled_at": timeutils.utcnow(),
"launched_at": timeutils.utcnow(),
"terminated_at": timeutils.utcnow(),
"availability_zone": "",
"display_name": server_name,
"display_description": "",

View File

@ -39,6 +39,7 @@ from nova.db.sqlalchemy import models
from nova import exception as exc
import nova.image.fake
from nova.openstack.common import jsonutils
from nova.openstack.common import timeutils
from nova import quota
from nova.tests import fake_network
from nova.tests.glance import stubs as glance_stubs
@ -329,7 +330,7 @@ class FakeAuthDatabase(object):
@staticmethod
def auth_token_create(context, token):
fake_token = FakeToken(created_at=utils.utcnow(), **token)
fake_token = FakeToken(created_at=timeutils.utcnow(), **token)
FakeAuthDatabase.data[fake_token.token_hash] = fake_token
FakeAuthDatabase.data['id_%i' % fake_token.id] = fake_token
return fake_token
@ -575,9 +576,9 @@ def stub_instance(id, user_id=None, project_id=None, host=None,
"user_data": "",
"reservation_id": reservation_id,
"mac_address": "",
"scheduled_at": utils.utcnow(),
"launched_at": utils.utcnow(),
"terminated_at": utils.utcnow(),
"scheduled_at": timeutils.utcnow(),
"launched_at": timeutils.utcnow(),
"terminated_at": timeutils.utcnow(),
"availability_zone": "",
"display_name": display_name or server_name,
"display_description": "",

View File

@ -20,9 +20,9 @@ from nova.api.openstack.volume import snapshots
from nova import exception
from nova import flags
from nova import log as logging
from nova.openstack.common import timeutils
from nova import test
from nova.tests.api.openstack import fakes
from nova import utils
from nova import volume
FLAGS = flags.FLAGS
@ -170,7 +170,7 @@ class SnapshotSerializerTest(test.TestCase):
id='snap_id',
status='snap_status',
size=1024,
created_at=utils.utcnow(),
created_at=timeutils.utcnow(),
display_name='snap_name',
display_description='snap_desc',
volume_id='vol_id',
@ -188,7 +188,7 @@ class SnapshotSerializerTest(test.TestCase):
id='snap1_id',
status='snap1_status',
size=1024,
created_at=utils.utcnow(),
created_at=timeutils.utcnow(),
display_name='snap1_name',
display_description='snap1_desc',
volume_id='vol1_id',
@ -197,7 +197,7 @@ class SnapshotSerializerTest(test.TestCase):
id='snap2_id',
status='snap2_status',
size=1024,
created_at=utils.utcnow(),
created_at=timeutils.utcnow(),
display_name='snap2_name',
display_description='snap2_desc',
volume_id='vol2_id',

View File

@ -20,9 +20,9 @@ import webob
from nova.api.openstack.volume import volumes
from nova import flags
from nova.openstack.common import timeutils
from nova import test
from nova.tests.api.openstack import fakes
from nova import utils
from nova.volume import api as volume_api
@ -215,7 +215,7 @@ class VolumeSerializerTest(test.TestCase):
status='vol_status',
size=1024,
availability_zone='vol_availability',
created_at=utils.utcnow(),
created_at=timeutils.utcnow(),
attachments=[dict(
id='vol_id',
volume_id='vol_id',
@ -244,7 +244,7 @@ class VolumeSerializerTest(test.TestCase):
status='vol1_status',
size=1024,
availability_zone='vol1_availability',
created_at=utils.utcnow(),
created_at=timeutils.utcnow(),
attachments=[dict(
id='vol1_id',
volume_id='vol1_id',
@ -264,7 +264,7 @@ class VolumeSerializerTest(test.TestCase):
status='vol2_status',
size=1024,
availability_zone='vol2_availability',
created_at=utils.utcnow(),
created_at=timeutils.utcnow(),
attachments=[dict(
id='vol2_id',
volume_id='vol2_id',

View File

@ -44,6 +44,7 @@ from nova import log as logging
from nova.notifier import test_notifier
from nova.openstack.common import importutils
from nova.openstack.common import policy as common_policy
from nova.openstack.common import timeutils
import nova.policy
from nova import quota
from nova import rpc
@ -203,7 +204,7 @@ class ComputeTestCase(BaseTestCase):
def tearDown(self):
super(ComputeTestCase, self).tearDown()
utils.clear_time_override()
timeutils.clear_time_override()
def test_wrap_instance_fault(self):
inst_uuid = "fake_uuid"
@ -382,12 +383,12 @@ class ComputeTestCase(BaseTestCase):
instance = self._create_fake_instance()
self.assertEqual(instance['launched_at'], None)
self.assertEqual(instance['deleted_at'], None)
launch = utils.utcnow()
launch = timeutils.utcnow()
self.compute.run_instance(self.context, instance['uuid'])
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assert_(instance['launched_at'] > launch)
self.assertEqual(instance['deleted_at'], None)
terminate = utils.utcnow()
terminate = timeutils.utcnow()
self.compute.terminate_instance(self.context, instance['uuid'])
context = self.context.elevated(read_deleted="only")
instance = db.instance_get_by_uuid(context, instance['uuid'])
@ -506,13 +507,13 @@ class ComputeTestCase(BaseTestCase):
"""Ensure instance can be rebuilt"""
old_time = datetime.datetime(2012, 4, 1)
cur_time = datetime.datetime(2012, 12, 21, 12, 21)
utils.set_time_override(old_time)
timeutils.set_time_override(old_time)
instance = self._create_fake_instance()
instance_uuid = instance['uuid']
image_ref = instance['image_ref']
self.compute.run_instance(self.context, instance_uuid)
utils.set_time_override(cur_time)
timeutils.set_time_override(cur_time)
self.compute.rebuild_instance(self.context, instance_uuid,
image_ref, image_ref)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
@ -884,12 +885,12 @@ class ComputeTestCase(BaseTestCase):
"""Ensure terminate_instance generates apropriate usage notification"""
old_time = datetime.datetime(2012, 4, 1)
cur_time = datetime.datetime(2012, 12, 21, 12, 21)
utils.set_time_override(old_time)
timeutils.set_time_override(old_time)
inst_ref = self._create_fake_instance()
self.compute.run_instance(self.context, inst_ref['uuid'])
test_notifier.NOTIFICATIONS = []
utils.set_time_override(cur_time)
timeutils.set_time_override(cur_time)
self.compute.terminate_instance(self.context, inst_ref['uuid'])
self.assertEquals(len(test_notifier.NOTIFICATIONS), 4)
@ -1069,10 +1070,10 @@ class ComputeTestCase(BaseTestCase):
"""Ensure notifications on instance migrate/resize"""
old_time = datetime.datetime(2012, 4, 1)
cur_time = datetime.datetime(2012, 12, 21, 12, 21)
utils.set_time_override(old_time)
timeutils.set_time_override(old_time)
inst_ref = self._create_fake_instance()
self.compute.run_instance(self.context, inst_ref['uuid'])
utils.set_time_override(cur_time)
timeutils.set_time_override(cur_time)
test_notifier.NOTIFICATIONS = []
instance = db.instance_get_by_uuid(self.context, inst_ref['uuid'])
@ -1123,7 +1124,7 @@ class ComputeTestCase(BaseTestCase):
"""Ensure notifications on instance migrate/resize"""
old_time = datetime.datetime(2012, 4, 1)
cur_time = datetime.datetime(2012, 12, 21, 12, 21)
utils.set_time_override(old_time)
timeutils.set_time_override(old_time)
instance = self._create_fake_instance()
context = self.context.elevated()
old_type_id = instance_types.get_instance_type_by_name(
@ -1140,7 +1141,7 @@ class ComputeTestCase(BaseTestCase):
'pre-migrating')
self.compute.resize_instance(context, instance['uuid'],
migration_ref['id'], {})
utils.set_time_override(cur_time)
timeutils.set_time_override(cur_time)
test_notifier.NOTIFICATIONS = []
self.compute.finish_resize(context, instance['uuid'],
@ -1172,12 +1173,12 @@ class ComputeTestCase(BaseTestCase):
"""Ensure notifications on instance migrate/resize"""
old_time = datetime.datetime(2012, 4, 1)
cur_time = datetime.datetime(2012, 12, 21, 12, 21)
utils.set_time_override(old_time)
timeutils.set_time_override(old_time)
instance = self._create_fake_instance()
context = self.context.elevated()
self.compute.run_instance(self.context, instance['uuid'])
utils.set_time_override(cur_time)
timeutils.set_time_override(cur_time)
test_notifier.NOTIFICATIONS = []
db.instance_update(self.context, instance['uuid'], {'host': 'foo'})
@ -1673,7 +1674,8 @@ class ComputeTestCase(BaseTestCase):
def test_cleanup_running_deleted_instances(self):
admin_context = context.get_admin_context()
deleted_at = utils.utcnow() - datetime.timedelta(hours=1, minutes=5)
deleted_at = (timeutils.utcnow() -
datetime.timedelta(hours=1, minutes=5))
instance = self._create_fake_instance({"deleted_at": deleted_at,
"deleted": True})
@ -1715,8 +1717,8 @@ class ComputeTestCase(BaseTestCase):
instance2.deleted = False
instance2.deleted_at = None
self.mox.StubOutWithMock(utils, 'is_older_than')
utils.is_older_than('sometimeago',
self.mox.StubOutWithMock(timeutils, 'is_older_than')
timeutils.is_older_than('sometimeago',
FLAGS.running_deleted_instance_timeout).AndReturn(True)
self.mox.StubOutWithMock(self.compute.db, "instance_get_all_by_host")
@ -1879,7 +1881,7 @@ class ComputeTestCase(BaseTestCase):
self.flags(instance_build_timeout=0)
ctxt = context.get_admin_context()
called = {'get_all': False, 'set_error_state': 0}
created_at = utils.utcnow() + datetime.timedelta(seconds=-60)
created_at = timeutils.utcnow() + datetime.timedelta(seconds=-60)
def fake_instance_get_all_by_filters(*args, **kwargs):
called['get_all'] = True
@ -1911,7 +1913,7 @@ class ComputeTestCase(BaseTestCase):
self.flags(instance_build_timeout=30)
ctxt = context.get_admin_context()
called = {'get_all': False, 'set_error_state': 0}
created_at = utils.utcnow() + datetime.timedelta(seconds=-60)
created_at = timeutils.utcnow() + datetime.timedelta(seconds=-60)
def fake_instance_get_all_by_filters(*args, **kwargs):
called['get_all'] = True
@ -1943,7 +1945,7 @@ class ComputeTestCase(BaseTestCase):
self.flags(instance_build_timeout=30)
ctxt = context.get_admin_context()
called = {'get_all': False, 'set_error_state': 0}
created_at = utils.utcnow() + datetime.timedelta(seconds=-60)
created_at = timeutils.utcnow() + datetime.timedelta(seconds=-60)
def fake_instance_get_all_by_filters(*args, **kwargs):
called['get_all'] = True
@ -1970,9 +1972,12 @@ class ComputeTestCase(BaseTestCase):
#not expired
uuid = 'fake-uuid-5'
instance_map[uuid] = {'uuid': uuid, 'host': FLAGS.host,
'vm_state': vm_states.BUILDING,
'created_at': utils.utcnow()}
instance_map[uuid] = {
'uuid': uuid,
'host': FLAGS.host,
'vm_state': vm_states.BUILDING,
'created_at': timeutils.utcnow(),
}
instances.append(instance_map[uuid])
self.compute._check_instance_build_time(ctxt)

View File

@ -20,10 +20,10 @@ import datetime
from nova import db
from nova import exception
from nova.openstack.common import timeutils
from nova.scheduler import host_manager
from nova import test
from nova.tests.scheduler import fakes
from nova import utils
class ComputeFilterClass1(object):
@ -90,10 +90,10 @@ class HostManagerTestCase(test.TestCase):
def test_update_service_capabilities(self):
service_states = self.host_manager.service_states
self.assertDictMatch(service_states, {})
self.mox.StubOutWithMock(utils, 'utcnow')
utils.utcnow().AndReturn(31337)
utils.utcnow().AndReturn(31338)
utils.utcnow().AndReturn(31339)
self.mox.StubOutWithMock(timeutils, 'utcnow')
timeutils.utcnow().AndReturn(31337)
timeutils.utcnow().AndReturn(31338)
timeutils.utcnow().AndReturn(31339)
host1_compute_capabs = dict(free_memory=1234, host_memory=5678,
timestamp=1)
@ -138,10 +138,10 @@ class HostManagerTestCase(test.TestCase):
self.host_manager.service_states = service_states
self.mox.StubOutWithMock(utils, 'utcnow')
utils.utcnow().AndReturn(datetime.datetime.fromtimestamp(3020))
utils.utcnow().AndReturn(datetime.datetime.fromtimestamp(3020))
utils.utcnow().AndReturn(datetime.datetime.fromtimestamp(3020))
self.mox.StubOutWithMock(timeutils, 'utcnow')
timeutils.utcnow().AndReturn(datetime.datetime.fromtimestamp(3020))
timeutils.utcnow().AndReturn(datetime.datetime.fromtimestamp(3020))
timeutils.utcnow().AndReturn(datetime.datetime.fromtimestamp(3020))
self.mox.ReplayAll()
res1 = self.host_manager.host_service_caps_stale('host1', 'compute')

View File

@ -28,6 +28,7 @@ from nova import db
from nova import exception
from nova import flags
from nova.openstack.common import jsonutils
from nova.openstack.common import timeutils
from nova import rpc
from nova.rpc import common as rpc_common
from nova.scheduler import driver
@ -983,12 +984,12 @@ class SchedulerDriverModuleTestCase(test.TestCase):
'extra_arg': 'meow'}
queue = 'fake_queue'
self.mox.StubOutWithMock(utils, 'utcnow')
self.mox.StubOutWithMock(timeutils, 'utcnow')
self.mox.StubOutWithMock(db, 'volume_update')
self.mox.StubOutWithMock(rpc, 'queue_get_for')
self.mox.StubOutWithMock(rpc, 'cast')
utils.utcnow().AndReturn('fake-now')
timeutils.utcnow().AndReturn('fake-now')
db.volume_update(self.context, 31337,
{'host': host, 'scheduled_at': 'fake-now'})
rpc.queue_get_for(self.context, 'volume', host).AndReturn(queue)
@ -1043,12 +1044,12 @@ class SchedulerDriverModuleTestCase(test.TestCase):
'extra_arg': 'meow'}
queue = 'fake_queue'
self.mox.StubOutWithMock(utils, 'utcnow')
self.mox.StubOutWithMock(timeutils, 'utcnow')
self.mox.StubOutWithMock(db, 'instance_update')
self.mox.StubOutWithMock(rpc, 'queue_get_for')
self.mox.StubOutWithMock(rpc, 'cast')
utils.utcnow().AndReturn('fake-now')
timeutils.utcnow().AndReturn('fake-now')
db.instance_update(self.context, 31337,
{'host': host, 'scheduled_at': 'fake-now'})
rpc.queue_get_for(self.context, 'compute', host).AndReturn(queue)

View File

@ -35,8 +35,8 @@ from nova import block_device
from nova import context
from nova import exception
from nova import flags
from nova.openstack.common import timeutils
from nova import test
from nova import utils
FLAGS = flags.FLAGS
@ -252,12 +252,12 @@ class ApiEc2TestCase(test.TestCase):
"""
conv = apirequest._database_to_isoformat
# sqlite database representation with microseconds
time_to_convert = utils.parse_strtime("2011-02-21 20:14:10.634276",
"%Y-%m-%d %H:%M:%S.%f")
time_to_convert = timeutils.parse_strtime("2011-02-21 20:14:10.634276",
"%Y-%m-%d %H:%M:%S.%f")
self.assertEqual(conv(time_to_convert), '2011-02-21T20:14:10.634Z')
# mysqlite database representation
time_to_convert = utils.parse_strtime("2011-02-21 19:56:18",
"%Y-%m-%d %H:%M:%S")
time_to_convert = timeutils.parse_strtime("2011-02-21 19:56:18",
"%Y-%m-%d %H:%M:%S")
self.assertEqual(conv(time_to_convert), '2011-02-21T19:56:18.000Z')
def test_xmlns_version_matches_request_version(self):

View File

@ -24,6 +24,7 @@ from nova import context
from nova import db
from nova import exception
from nova import flags
from nova.openstack.common import timeutils
from nova import test
from nova import utils
@ -96,7 +97,7 @@ class DbApiTestCase(test.TestCase):
db.migration_update(ctxt, migration.id, {"status": "CONFIRMED"})
# Ensure the new migration is not returned.
updated_at = utils.utcnow()
updated_at = timeutils.utcnow()
values = {"status": "finished", "updated_at": updated_at}
migration = db.migration_create(ctxt, values)
results = db.migration_get_all_unconfirmed(ctxt, 10)
@ -120,7 +121,7 @@ class DbApiTestCase(test.TestCase):
db.instance_update(ctxt, instance['uuid'], {"task_state": None})
# Ensure the newly rebooted instance is not returned.
updated_at = utils.utcnow()
updated_at = timeutils.utcnow()
values = {"task_state": "rebooting", "updated_at": updated_at}
instance = db.instance_create(ctxt, values)
results = db.instance_get_all_hung_in_rebooting(ctxt, 10)
@ -383,7 +384,7 @@ class DbApiTestCase(test.TestCase):
db.fixed_ip_create(ctxt, values)
def test_fixed_ip_disassociate_all_by_timeout_single_host(self):
now = utils.utcnow()
now = timeutils.utcnow()
ctxt = context.get_admin_context()
self._timeout_test(ctxt, now, False)
result = db.fixed_ip_disassociate_all_by_timeout(ctxt, 'foo', now)
@ -392,7 +393,7 @@ class DbApiTestCase(test.TestCase):
self.assertEqual(result, 1)
def test_fixed_ip_disassociate_all_by_timeout_multi_host(self):
now = utils.utcnow()
now = timeutils.utcnow()
ctxt = context.get_admin_context()
self._timeout_test(ctxt, now, True)
result = db.fixed_ip_disassociate_all_by_timeout(ctxt, 'foo', now)

View File

@ -26,11 +26,11 @@ from nova.db.sqlalchemy import api as sqa_api
from nova.db.sqlalchemy import models as sqa_models
from nova import exception
from nova import flags
from nova.openstack.common import timeutils
from nova import quota
from nova import rpc
from nova.scheduler import driver as scheduler_driver
from nova import test
from nova import utils
from nova import volume
@ -706,10 +706,10 @@ class DbQuotaDriverTestCase(test.TestCase):
self.calls = []
utils.set_time_override()
timeutils.set_time_override()
def tearDown(self):
utils.clear_time_override()
timeutils.clear_time_override()
super(DbQuotaDriverTestCase, self).tearDown()
def test_get_defaults(self):
@ -1273,7 +1273,7 @@ class DbQuotaDriverTestCase(test.TestCase):
quota.QUOTAS._resources,
dict(instances=2))
expire = utils.utcnow() + datetime.timedelta(seconds=86400)
expire = timeutils.utcnow() + datetime.timedelta(seconds=86400)
self.assertEqual(self.calls, [
'get_project_quotas',
('quota_reserve', expire, 0, 0),
@ -1287,7 +1287,7 @@ class DbQuotaDriverTestCase(test.TestCase):
quota.QUOTAS._resources,
dict(instances=2), expire=3600)
expire = utils.utcnow() + datetime.timedelta(seconds=3600)
expire = timeutils.utcnow() + datetime.timedelta(seconds=3600)
self.assertEqual(self.calls, [
'get_project_quotas',
('quota_reserve', expire, 0, 0),
@ -1302,7 +1302,7 @@ class DbQuotaDriverTestCase(test.TestCase):
quota.QUOTAS._resources,
dict(instances=2), expire=expire_delta)
expire = utils.utcnow() + expire_delta
expire = timeutils.utcnow() + expire_delta
self.assertEqual(self.calls, [
'get_project_quotas',
('quota_reserve', expire, 0, 0),
@ -1312,7 +1312,7 @@ class DbQuotaDriverTestCase(test.TestCase):
def test_reserve_datetime_expire(self):
self._stub_get_project_quotas()
self._stub_quota_reserve()
expire = utils.utcnow() + datetime.timedelta(seconds=120)
expire = timeutils.utcnow() + datetime.timedelta(seconds=120)
result = self.driver.reserve(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(instances=2), expire=expire)
@ -1327,7 +1327,7 @@ class DbQuotaDriverTestCase(test.TestCase):
self._stub_get_project_quotas()
self._stub_quota_reserve()
self.flags(until_refresh=500)
expire = utils.utcnow() + datetime.timedelta(seconds=120)
expire = timeutils.utcnow() + datetime.timedelta(seconds=120)
result = self.driver.reserve(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(instances=2), expire=expire)
@ -1342,7 +1342,7 @@ class DbQuotaDriverTestCase(test.TestCase):
self._stub_get_project_quotas()
self._stub_quota_reserve()
self.flags(max_age=86400)
expire = utils.utcnow() + datetime.timedelta(seconds=120)
expire = timeutils.utcnow() + datetime.timedelta(seconds=120)
result = self.driver.reserve(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(instances=2), expire=expire)
@ -1396,7 +1396,7 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
res = quota.ReservableResource(res_name, make_sync(res_name))
self.resources[res_name] = res
self.expire = utils.utcnow() + datetime.timedelta(seconds=3600)
self.expire = timeutils.utcnow() + datetime.timedelta(seconds=3600)
self.usages = {}
self.usages_created = {}
@ -1413,7 +1413,7 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
save=True):
quota_usage_ref = self._make_quota_usage(
project_id, resource, in_use, reserved, until_refresh,
utils.utcnow(), utils.utcnow())
timeutils.utcnow(), timeutils.utcnow())
self.usages_created[resource] = quota_usage_ref
@ -1423,7 +1423,7 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
resource, delta, expire, session=None):
reservation_ref = self._make_reservation(
uuid, usage_id, project_id, resource, delta, expire,
utils.utcnow(), utils.utcnow())
timeutils.utcnow(), timeutils.utcnow())
self.reservations_created[resource] = reservation_ref
@ -1434,7 +1434,7 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
self.stubs.Set(sqa_api, 'quota_usage_create', fake_quota_usage_create)
self.stubs.Set(sqa_api, 'reservation_create', fake_reservation_create)
utils.set_time_override()
timeutils.set_time_override()
def _make_quota_usage(self, project_id, resource, in_use, reserved,
until_refresh, created_at, updated_at):
@ -1455,9 +1455,9 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
def init_usage(self, project_id, resource, in_use, reserved,
until_refresh=None, created_at=None, updated_at=None):
if created_at is None:
created_at = utils.utcnow()
created_at = timeutils.utcnow()
if updated_at is None:
updated_at = utils.utcnow()
updated_at = timeutils.utcnow()
quota_usage_ref = self._make_quota_usage(project_id, resource, in_use,
reserved, until_refresh,
@ -1659,7 +1659,8 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
def test_quota_reserve_max_age(self):
max_age = 3600
record_created = utils.utcnow() - datetime.timedelta(seconds=max_age)
record_created = (timeutils.utcnow() -
datetime.timedelta(seconds=max_age))
self.init_usage('test_project', 'instances', 3, 0,
created_at=record_created, updated_at=record_created)
self.init_usage('test_project', 'cores', 3, 0,

View File

@ -32,6 +32,7 @@ import mox
import nova
from nova import exception
from nova import flags
from nova.openstack.common import timeutils
from nova import test
from nova import utils
@ -422,10 +423,10 @@ class GenericUtilsTestCase(test.TestCase):
down_time = 5
self.flags(service_down_time=down_time)
self.mox.StubOutWithMock(utils, 'utcnow')
self.mox.StubOutWithMock(timeutils, 'utcnow')
# Up (equal)
utils.utcnow().AndReturn(fts_func(fake_now))
timeutils.utcnow().AndReturn(fts_func(fake_now))
service = {'updated_at': fts_func(fake_now - down_time),
'created_at': fts_func(fake_now - down_time)}
self.mox.ReplayAll()
@ -434,7 +435,7 @@ class GenericUtilsTestCase(test.TestCase):
self.mox.ResetAll()
# Up
utils.utcnow().AndReturn(fts_func(fake_now))
timeutils.utcnow().AndReturn(fts_func(fake_now))
service = {'updated_at': fts_func(fake_now - down_time + 1),
'created_at': fts_func(fake_now - down_time + 1)}
self.mox.ReplayAll()
@ -443,7 +444,7 @@ class GenericUtilsTestCase(test.TestCase):
self.mox.ResetAll()
# Down
utils.utcnow().AndReturn(fts_func(fake_now))
timeutils.utcnow().AndReturn(fts_func(fake_now))
service = {'updated_at': fts_func(fake_now - down_time - 1),
'created_at': fts_func(fake_now - down_time - 1)}
self.mox.ReplayAll()
@ -532,111 +533,6 @@ class MonkeyPatchTestCase(test.TestCase):
in nova.tests.monkey_patch_example.CALLED_FUNCTION)
class Iso8601TimeTest(test.TestCase):
def _instaneous(self, timestamp, yr, mon, day, hr, min, sec, micro):
self.assertEquals(timestamp.year, yr)
self.assertEquals(timestamp.month, mon)
self.assertEquals(timestamp.day, day)
self.assertEquals(timestamp.hour, hr)
self.assertEquals(timestamp.minute, min)
self.assertEquals(timestamp.second, sec)
self.assertEquals(timestamp.microsecond, micro)
def _do_test(self, str, yr, mon, day, hr, min, sec, micro, shift):
DAY_SECONDS = 24 * 60 * 60
timestamp = utils.parse_isotime(str)
self._instaneous(timestamp, yr, mon, day, hr, min, sec, micro)
offset = timestamp.tzinfo.utcoffset(None)
self.assertEqual(offset.seconds + offset.days * DAY_SECONDS, shift)
def test_zulu(self):
str = '2012-02-14T20:53:07Z'
self._do_test(str, 2012, 02, 14, 20, 53, 7, 0, 0)
def test_zulu_micros(self):
str = '2012-02-14T20:53:07.123Z'
self._do_test(str, 2012, 02, 14, 20, 53, 7, 123000, 0)
def test_offset_east(self):
str = '2012-02-14T20:53:07+04:30'
offset = 4.5 * 60 * 60
self._do_test(str, 2012, 02, 14, 20, 53, 7, 0, offset)
def test_offset_east_micros(self):
str = '2012-02-14T20:53:07.42+04:30'
offset = 4.5 * 60 * 60
self._do_test(str, 2012, 02, 14, 20, 53, 7, 420000, offset)
def test_offset_west(self):
str = '2012-02-14T20:53:07-05:30'
offset = -5.5 * 60 * 60
self._do_test(str, 2012, 02, 14, 20, 53, 7, 0, offset)
def test_offset_west_micros(self):
str = '2012-02-14T20:53:07.654321-05:30'
offset = -5.5 * 60 * 60
self._do_test(str, 2012, 02, 14, 20, 53, 7, 654321, offset)
def test_compare(self):
zulu = utils.parse_isotime('2012-02-14T20:53:07')
east = utils.parse_isotime('2012-02-14T20:53:07-01:00')
west = utils.parse_isotime('2012-02-14T20:53:07+01:00')
self.assertTrue(east > west)
self.assertTrue(east > zulu)
self.assertTrue(zulu > west)
def test_compare_micros(self):
zulu = utils.parse_isotime('2012-02-14T20:53:07.6544')
east = utils.parse_isotime('2012-02-14T19:53:07.654321-01:00')
west = utils.parse_isotime('2012-02-14T21:53:07.655+01:00')
self.assertTrue(east < west)
self.assertTrue(east < zulu)
self.assertTrue(zulu < west)
def test_zulu_roundtrip(self):
str = '2012-02-14T20:53:07Z'
zulu = utils.parse_isotime(str)
self.assertEquals(zulu.tzinfo, iso8601.iso8601.UTC)
self.assertEquals(utils.isotime(zulu), str)
def test_east_roundtrip(self):
str = '2012-02-14T20:53:07-07:00'
east = utils.parse_isotime(str)
self.assertEquals(east.tzinfo.tzname(None), '-07:00')
self.assertEquals(utils.isotime(east), str)
def test_west_roundtrip(self):
str = '2012-02-14T20:53:07+11:30'
west = utils.parse_isotime(str)
self.assertEquals(west.tzinfo.tzname(None), '+11:30')
self.assertEquals(utils.isotime(west), str)
def test_now_roundtrip(self):
str = utils.isotime()
now = utils.parse_isotime(str)
self.assertEquals(now.tzinfo, iso8601.iso8601.UTC)
self.assertEquals(utils.isotime(now), str)
def test_zulu_normalize(self):
str = '2012-02-14T20:53:07Z'
zulu = utils.parse_isotime(str)
normed = utils.normalize_time(zulu)
self._instaneous(normed, 2012, 2, 14, 20, 53, 07, 0)
def test_east_normalize(self):
str = '2012-02-14T20:53:07-07:00'
east = utils.parse_isotime(str)
normed = utils.normalize_time(east)
self._instaneous(normed, 2012, 2, 15, 03, 53, 07, 0)
def test_west_normalize(self):
str = '2012-02-14T20:53:07+21:00'
west = utils.parse_isotime(str)
normed = utils.normalize_time(west)
self._instaneous(normed, 2012, 2, 13, 23, 53, 07, 0)
class TestGreenLocks(test.TestCase):
def test_concurrent_green_lock_succeeds(self):
"""Verify spawn_n greenthreads with two locks run concurrently.
@ -837,10 +733,10 @@ class AuditPeriodTest(test.TestCase):
day=5,
month=3,
year=2012)
utils.set_time_override(override_time=self.test_time)
timeutils.set_time_override(override_time=self.test_time)
def tearDown(self):
utils.clear_time_override()
timeutils.clear_time_override()
super(AuditPeriodTest, self).tearDown()
def test_hour(self):

View File

@ -31,13 +31,13 @@ from nova import exception
from nova import flags
from nova import log as logging
from nova.openstack.common import importutils
from nova.openstack.common import timeutils
from nova import test
from nova.tests.db import fakes as db_fakes
from nova.tests import fake_network
from nova.tests import fake_utils
from nova.tests.glance import stubs as glance_stubs
from nova.tests.xenapi import stubs
from nova import utils
from nova.virt.xenapi import connection as xenapi_conn
from nova.virt.xenapi import fake as xenapi_fake
from nova.virt.xenapi import vm_utils
@ -1316,7 +1316,8 @@ class XenAPIBWUsageTestCase(test.TestCase):
self.name = "instance-0001"
self.uuid = "1-2-3-4-5"
result = self.conn.get_all_bw_usage([testinstance()], utils.utcnow())
result = self.conn.get_all_bw_usage([testinstance()],
timeutils.utcnow())
self.assertEqual(result, [])

View File

@ -46,7 +46,6 @@ from eventlet import event
from eventlet.green import subprocess
from eventlet import greenthread
from eventlet import semaphore
import iso8601
import lockfile
import netaddr
@ -56,11 +55,10 @@ from nova import log as logging
from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import importutils
from nova.openstack.common import timeutils
LOG = logging.getLogger(__name__)
ISO_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S"
PERFECT_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%f"
FLAGS = flags.FLAGS
FLAGS.register_opt(
@ -331,7 +329,7 @@ def last_completed_audit_period(unit=None):
unit, offset = unit.split("@", 1)
offset = int(offset)
rightnow = utcnow()
rightnow = timeutils.utcnow()
if unit not in ('month', 'day', 'year', 'hour'):
raise ValueError('Time period must be hour, day, month or year')
if unit == 'month':
@ -446,83 +444,6 @@ def get_my_linklocal(interface):
raise exception.NovaException(msg)
def utcnow():
"""Overridable version of utils.utcnow."""
if utcnow.override_time:
return utcnow.override_time
return datetime.datetime.utcnow()
utcnow.override_time = None
def is_older_than(before, seconds):
"""Return True if before is older than seconds."""
return utcnow() - before > datetime.timedelta(seconds=seconds)
def utcnow_ts():
"""Timestamp version of our utcnow function."""
return time.mktime(utcnow().timetuple())
def set_time_override(override_time=utcnow()):
"""Override utils.utcnow to return a constant time."""
utcnow.override_time = override_time
def advance_time_delta(timedelta):
"""Advance overriden time using a datetime.timedelta."""
assert(not utcnow.override_time is None)
utcnow.override_time += timedelta
def advance_time_seconds(seconds):
"""Advance overriden time by seconds."""
advance_time_delta(datetime.timedelta(0, seconds))
def clear_time_override():
"""Remove the overridden time."""
utcnow.override_time = None
def strtime(at=None, fmt=PERFECT_TIME_FORMAT):
"""Returns formatted utcnow."""
if not at:
at = utcnow()
return at.strftime(fmt)
def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT):
"""Turn a formatted time back into a datetime."""
return datetime.datetime.strptime(timestr, fmt)
def isotime(at=None):
"""Stringify time in ISO 8601 format"""
if not at:
at = utcnow()
str = at.strftime(ISO_TIME_FORMAT)
tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC'
str += ('Z' if tz == 'UTC' else tz)
return str
def parse_isotime(timestr):
"""Turn an iso formatted time back into a datetime."""
try:
return iso8601.parse_date(timestr)
except (iso8601.ParseError, TypeError) as e:
raise ValueError(e.message)
def normalize_time(timestamp):
"""Normalize time in arbitrary timezone to UTC"""
offset = timestamp.utcoffset()
return timestamp.replace(tzinfo=None) - offset if offset else timestamp
def parse_mailmap(mailmap='.mailmap'):
mapping = {}
if os.path.exists(mailmap):
@ -1241,7 +1162,7 @@ def service_is_up(service):
"""Check whether a service is up based on last heartbeat."""
last_heartbeat = service['updated_at'] or service['created_at']
# Timestamps in DB are UTC.
elapsed = total_seconds(utcnow() - last_heartbeat)
elapsed = total_seconds(timeutils.utcnow() - last_heartbeat)
return abs(elapsed) <= FLAGS.service_down_time

View File

@ -60,7 +60,7 @@ import pprint
from nova import exception
from nova import log as logging
from nova.openstack.common import jsonutils
from nova import utils
from nova.openstack.common import timeutils
_CLASSES = ['host', 'network', 'session', 'pool', 'SR', 'VBD',
@ -739,7 +739,7 @@ class SessionBase(object):
except Failure, exc:
task['error_info'] = exc.details
task['status'] = 'failed'
task['finished'] = utils.utcnow()
task['finished'] = timeutils.utcnow()
return task_ref
def _check_session(self, params):

View File

@ -39,6 +39,7 @@ from nova import log as logging
from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import timeutils
from nova import utils
from nova.virt import driver
from nova.virt.xenapi import firewall
@ -1202,10 +1203,10 @@ class VMOps(object):
task_refs = self._session.call_xenapi("task.get_by_name_label", task)
for task_ref in task_refs:
task_rec = self._session.call_xenapi("task.get_record", task_ref)
task_created = utils.parse_strtime(task_rec["created"].value,
"%Y%m%dT%H:%M:%SZ")
task_created = timeutils.parse_strtime(task_rec["created"].value,
"%Y%m%dT%H:%M:%SZ")
if utils.is_older_than(task_created, timeout):
if timeutils.is_older_than(task_created, timeout):
self._session.call_xenapi("task.cancel", task_ref)
def poll_rebooting_instances(self, timeout):
@ -1242,15 +1243,15 @@ class VMOps(object):
last_ran = self.poll_rescue_last_ran
if not last_ran:
# We need a base time to start tracking.
self.poll_rescue_last_ran = utils.utcnow()
self.poll_rescue_last_ran = timeutils.utcnow()
return
if not utils.is_older_than(last_ran, timeout):
if not timeutils.is_older_than(last_ran, timeout):
# Do not run. Let's bail.
return
# Update the time tracker and proceed.
self.poll_rescue_last_ran = utils.utcnow()
self.poll_rescue_last_ran = timeutils.utcnow()
rescue_vms = []
for instance in self.list_instances():

View File

@ -26,10 +26,10 @@ from nova.db import base
from nova import exception
from nova import flags
from nova import log as logging
from nova.openstack.common import timeutils
import nova.policy
from nova import quota
from nova import rpc
from nova import utils
FLAGS = flags.FLAGS
flags.DECLARE('storage_availability_zone', 'nova.volume.manager')
@ -136,7 +136,7 @@ class API(base.Base):
msg = _("Volume still has %d dependent snapshots") % len(snapshots)
raise exception.InvalidVolume(reason=msg)
now = utils.utcnow()
now = timeutils.utcnow()
self.db.volume_update(context, volume_id, {'status': 'deleting',
'terminated_at': now})
host = volume['host']

View File

@ -46,6 +46,7 @@ from nova import manager
from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import importutils
from nova.openstack.common import timeutils
from nova import quota
from nova import utils
from nova.volume import utils as volume_utils
@ -149,7 +150,7 @@ class VolumeManager(manager.SchedulerDependentManager):
self.db.volume_update(context,
volume_ref['id'], {'status': 'error'})
now = utils.utcnow()
now = timeutils.utcnow()
volume_ref = self.db.volume_update(context,
volume_ref['id'], {'status': 'available',
'launched_at': now})

View File

@ -19,6 +19,7 @@
from nova import flags
from nova import log as logging
from nova.notifier import api as notifier_api
from nova.openstack.common import timeutils
from nova import utils
@ -35,7 +36,7 @@ def notify_usage_exists(context, volume_ref, current_period=False):
begin, end = utils.last_completed_audit_period()
if current_period:
audit_start = end
audit_end = utils.utcnow()
audit_end = timeutils.utcnow()
else:
audit_start = begin
audit_end = end

View File

@ -1,7 +1,7 @@
[DEFAULT]
# The list of modules to copy from openstack-common
modules=cfg,excutils,local,importutils,iniparser,jsonutils,setup,policy
modules=cfg,excutils,importutils,iniparser,jsonutils,local,policy,setup,timeutils
# The base module to hold the copy of openstack.common
base=nova

View File

@ -25,7 +25,7 @@ from nova import context
from nova import db
from nova import exception
from nova import flags
from nova import utils
from nova.openstack.common import timeutils
from nova.virt.xenapi import connection as xenapi_conn
@ -90,7 +90,7 @@ def find_orphaned_instances(session, verbose=False):
# NOTE(jk0): A zombie VM is an instance that is not active and hasn't
# been updated in over the specified period.
is_zombie_vm = (instance.vm_state != "active"
and utils.is_older_than(instance.updated_at,
and timeutils.is_older_than(instance.updated_at,
FLAGS.zombie_instance_updated_at_window))
if is_active_and_deleting or is_zombie_vm: