Enable N302: Import modules only
Fix all N302 issues, and re-enable. Change-Id: Ic94d144c915b228b7ff2fd9c5951875e159ffcdd
This commit is contained in:
parent
f9012e39ee
commit
8f394ee716
@ -3,29 +3,29 @@ Created on 2010/12/20
|
||||
|
||||
@author: Nachi Ueno <ueno.nachi@lab.ntt.co.jp>
|
||||
'''
|
||||
import boto.ec2.instance
|
||||
from boto.resultset import ResultSet
|
||||
from boto.ec2 import instance
|
||||
from boto import resultset
|
||||
|
||||
|
||||
class ReservationV6(boto.ec2.instance.Reservation):
|
||||
class ReservationV6(instance.Reservation):
|
||||
def startElement(self, name, attrs, connection):
|
||||
if name == 'instancesSet':
|
||||
self.instances = ResultSet([('item', InstanceV6)])
|
||||
self.instances = resultset.ResultSet([('item', InstanceV6)])
|
||||
return self.instances
|
||||
elif name == 'groupSet':
|
||||
self.groups = ResultSet([('item', boto.ec2.instance.Group)])
|
||||
self.groups = resultset.ResultSet([('item', instance.Group)])
|
||||
return self.groups
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
class InstanceV6(boto.ec2.instance.Instance):
|
||||
class InstanceV6(instance.Instance):
|
||||
def __init__(self, connection=None):
|
||||
boto.ec2.instance.Instance.__init__(self, connection)
|
||||
instance.Instance.__init__(self, connection)
|
||||
self.dns_name_v6 = None
|
||||
|
||||
def endElement(self, name, value, connection):
|
||||
boto.ec2.instance.Instance.endElement(self, name, value, connection)
|
||||
instance.Instance.endElement(self, name, value, connection)
|
||||
if name == 'dnsNameV6':
|
||||
self.dns_name_v6 = value
|
||||
|
||||
|
@ -23,7 +23,7 @@ import sys
|
||||
import telnetlib
|
||||
import tempfile
|
||||
|
||||
from coverage import coverage
|
||||
import coverage
|
||||
from webob import exc
|
||||
|
||||
from nova.api.openstack import extensions
|
||||
@ -47,7 +47,7 @@ class CoverageController(object):
|
||||
def __init__(self):
|
||||
self.data_path = tempfile.mkdtemp(prefix='nova-coverage_')
|
||||
data_out = os.path.join(self.data_path, '.nova-coverage')
|
||||
self.coverInst = coverage(data_file=data_out)
|
||||
self.coverInst = coverage.coverage(data_file=data_out)
|
||||
self.compute_api = compute_api.API()
|
||||
self.network_api = network_api.API()
|
||||
self.conductor_api = conductor_api.API()
|
||||
|
@ -35,7 +35,7 @@ from sqlalchemy.sql.expression import desc
|
||||
from sqlalchemy.sql import func
|
||||
|
||||
from nova import block_device
|
||||
from nova.common.sqlalchemyutils import paginate_query
|
||||
from nova.common import sqlalchemyutils
|
||||
from nova.compute import task_states
|
||||
from nova.compute import vm_states
|
||||
from nova import db
|
||||
@ -1645,7 +1645,8 @@ def instance_get_all_by_filters(context, filters, sort_key, sort_dir,
|
||||
marker = _instance_get_by_uuid(context, marker, session=session)
|
||||
except exception.InstanceNotFound:
|
||||
raise exception.MarkerNotFound(marker)
|
||||
query_prefix = paginate_query(query_prefix, models.Instance, limit,
|
||||
query_prefix = sqlalchemyutils.paginate_query(query_prefix,
|
||||
models.Instance, limit,
|
||||
[sort_key, 'created_at', 'id'],
|
||||
marker=marker,
|
||||
sort_dir=sort_dir)
|
||||
|
@ -27,7 +27,7 @@ from sqlalchemy import ForeignKey, DateTime, Boolean, Text, Float
|
||||
from sqlalchemy.orm import relationship, backref, object_mapper
|
||||
|
||||
from nova.db.sqlalchemy.session import get_session
|
||||
from nova.db.sqlalchemy.types import IPAddress
|
||||
from nova.db.sqlalchemy import types
|
||||
from nova.openstack.common import cfg
|
||||
from nova.openstack.common import timeutils
|
||||
|
||||
@ -291,8 +291,8 @@ class Instance(BASE, NovaBase):
|
||||
|
||||
# User editable field meant to represent what ip should be used
|
||||
# to connect to the instance
|
||||
access_ip_v4 = Column(IPAddress())
|
||||
access_ip_v6 = Column(IPAddress())
|
||||
access_ip_v4 = Column(types.IPAddress())
|
||||
access_ip_v6 = Column(types.IPAddress())
|
||||
|
||||
auto_disk_config = Column(Boolean())
|
||||
progress = Column(Integer)
|
||||
@ -595,7 +595,7 @@ class SecurityGroupIngressRule(BASE, NovaBase):
|
||||
protocol = Column(String(5)) # "tcp", "udp", or "icmp"
|
||||
from_port = Column(Integer)
|
||||
to_port = Column(Integer)
|
||||
cidr = Column(IPAddress())
|
||||
cidr = Column(types.IPAddress())
|
||||
|
||||
# Note: This is not the parent SecurityGroup. It's SecurityGroup we're
|
||||
# granting access for.
|
||||
@ -615,7 +615,7 @@ class ProviderFirewallRule(BASE, NovaBase):
|
||||
protocol = Column(String(5)) # "tcp", "udp", or "icmp"
|
||||
from_port = Column(Integer)
|
||||
to_port = Column(Integer)
|
||||
cidr = Column(IPAddress())
|
||||
cidr = Column(types.IPAddress())
|
||||
|
||||
|
||||
class KeyPair(BASE, NovaBase):
|
||||
@ -665,25 +665,25 @@ class Network(BASE, NovaBase):
|
||||
label = Column(String(255))
|
||||
|
||||
injected = Column(Boolean, default=False)
|
||||
cidr = Column(IPAddress(), unique=True)
|
||||
cidr_v6 = Column(IPAddress(), unique=True)
|
||||
cidr = Column(types.IPAddress(), unique=True)
|
||||
cidr_v6 = Column(types.IPAddress(), unique=True)
|
||||
multi_host = Column(Boolean, default=False)
|
||||
|
||||
gateway_v6 = Column(IPAddress())
|
||||
netmask_v6 = Column(IPAddress())
|
||||
netmask = Column(IPAddress())
|
||||
gateway_v6 = Column(types.IPAddress())
|
||||
netmask_v6 = Column(types.IPAddress())
|
||||
netmask = Column(types.IPAddress())
|
||||
bridge = Column(String(255))
|
||||
bridge_interface = Column(String(255))
|
||||
gateway = Column(IPAddress())
|
||||
broadcast = Column(IPAddress())
|
||||
dns1 = Column(IPAddress())
|
||||
dns2 = Column(IPAddress())
|
||||
gateway = Column(types.IPAddress())
|
||||
broadcast = Column(types.IPAddress())
|
||||
dns1 = Column(types.IPAddress())
|
||||
dns2 = Column(types.IPAddress())
|
||||
|
||||
vlan = Column(Integer)
|
||||
vpn_public_address = Column(IPAddress())
|
||||
vpn_public_address = Column(types.IPAddress())
|
||||
vpn_public_port = Column(Integer)
|
||||
vpn_private_address = Column(IPAddress())
|
||||
dhcp_start = Column(IPAddress())
|
||||
vpn_private_address = Column(types.IPAddress())
|
||||
dhcp_start = Column(types.IPAddress())
|
||||
|
||||
rxtx_base = Column(Integer)
|
||||
|
||||
@ -708,7 +708,7 @@ class FixedIp(BASE, NovaBase):
|
||||
"""Represents a fixed ip for an instance."""
|
||||
__tablename__ = 'fixed_ips'
|
||||
id = Column(Integer, primary_key=True)
|
||||
address = Column(IPAddress())
|
||||
address = Column(types.IPAddress())
|
||||
network_id = Column(Integer, nullable=True)
|
||||
virtual_interface_id = Column(Integer, nullable=True)
|
||||
instance_uuid = Column(String(36), nullable=True)
|
||||
@ -725,7 +725,7 @@ class FloatingIp(BASE, NovaBase):
|
||||
"""Represents a floating ip that dynamically forwards to a fixed ip."""
|
||||
__tablename__ = 'floating_ips'
|
||||
id = Column(Integer, primary_key=True)
|
||||
address = Column(IPAddress())
|
||||
address = Column(types.IPAddress())
|
||||
fixed_ip_id = Column(Integer, nullable=True)
|
||||
project_id = Column(String(255))
|
||||
host = Column(String(255)) # , ForeignKey('hosts.id'))
|
||||
@ -748,7 +748,7 @@ class ConsolePool(BASE, NovaBase):
|
||||
"""Represents pool of consoles on the same physical node."""
|
||||
__tablename__ = 'console_pools'
|
||||
id = Column(Integer, primary_key=True)
|
||||
address = Column(IPAddress())
|
||||
address = Column(types.IPAddress())
|
||||
username = Column(String(255))
|
||||
password = Column(String(255))
|
||||
console_type = Column(String(255))
|
||||
|
@ -19,4 +19,6 @@ The membership service for Nova. Different implementations can be plugged
|
||||
according to the Nova configuration.
|
||||
"""
|
||||
|
||||
from nova.servicegroup.api import API
|
||||
from nova.servicegroup import api
|
||||
|
||||
API = api.API
|
||||
|
@ -16,7 +16,7 @@
|
||||
|
||||
import telnetlib
|
||||
|
||||
from coverage import coverage
|
||||
import coverage
|
||||
import webob
|
||||
|
||||
from nova.api.openstack.compute.contrib import coverage_ext
|
||||
@ -48,8 +48,8 @@ class CoverageExtensionTest(test.TestCase):
|
||||
super(CoverageExtensionTest, self).setUp()
|
||||
self.stubs.Set(telnetlib.Telnet, 'write', fake_telnet)
|
||||
self.stubs.Set(telnetlib.Telnet, 'expect', fake_telnet)
|
||||
self.stubs.Set(coverage, 'report', fake_report)
|
||||
self.stubs.Set(coverage, 'xml_report', fake_xml_report)
|
||||
self.stubs.Set(coverage.coverage, 'report', fake_report)
|
||||
self.stubs.Set(coverage.coverage, 'xml_report', fake_xml_report)
|
||||
self.admin_context = context.RequestContext('fakeadmin_0',
|
||||
'fake',
|
||||
is_admin=True)
|
||||
|
@ -22,10 +22,7 @@
|
||||
import os
|
||||
|
||||
import mox
|
||||
from testtools.matchers import Contains
|
||||
from testtools.matchers import MatchesAll
|
||||
from testtools.matchers import Not
|
||||
from testtools.matchers import StartsWith
|
||||
from testtools import matchers
|
||||
|
||||
from nova import exception
|
||||
from nova.openstack.common import cfg
|
||||
@ -120,26 +117,26 @@ class PXEClassMethodsTestCase(BareMetalPXETestCase):
|
||||
'ari_path': 'ggg',
|
||||
}
|
||||
config = pxe.build_pxe_config(**args)
|
||||
self.assertThat(config, StartsWith('default deploy'))
|
||||
self.assertThat(config, matchers.StartsWith('default deploy'))
|
||||
|
||||
# deploy bits are in the deploy section
|
||||
start = config.index('label deploy')
|
||||
end = config.index('label boot')
|
||||
self.assertThat(config[start:end], MatchesAll(
|
||||
Contains('kernel ddd'),
|
||||
Contains('initrd=eee'),
|
||||
Contains('deployment_id=aaa'),
|
||||
Contains('deployment_key=bbb'),
|
||||
Contains('iscsi_target_iqn=ccc'),
|
||||
Not(Contains('kernel fff')),
|
||||
self.assertThat(config[start:end], matchers.MatchesAll(
|
||||
matchers.Contains('kernel ddd'),
|
||||
matchers.Contains('initrd=eee'),
|
||||
matchers.Contains('deployment_id=aaa'),
|
||||
matchers.Contains('deployment_key=bbb'),
|
||||
matchers.Contains('iscsi_target_iqn=ccc'),
|
||||
matchers.Not(matchers.Contains('kernel fff')),
|
||||
))
|
||||
|
||||
# boot bits are in the boot section
|
||||
start = config.index('label boot')
|
||||
self.assertThat(config[start:], MatchesAll(
|
||||
Contains('kernel fff'),
|
||||
Contains('initrd=ggg'),
|
||||
Not(Contains('kernel ddd')),
|
||||
self.assertThat(config[start:], matchers.MatchesAll(
|
||||
matchers.Contains('kernel fff'),
|
||||
matchers.Contains('initrd=ggg'),
|
||||
matchers.Not(matchers.Contains('kernel ddd')),
|
||||
))
|
||||
|
||||
def test_build_network_config(self):
|
||||
|
@ -58,7 +58,7 @@ import nova.policy
|
||||
from nova import quota
|
||||
from nova import test
|
||||
from nova.tests.compute import fake_resource_tracker
|
||||
from nova.tests.db.fakes import FakeModel
|
||||
from nova.tests.db import fakes as db_fakes
|
||||
from nova.tests import fake_network
|
||||
from nova.tests.image import fake as fake_image
|
||||
from nova.tests import matchers
|
||||
@ -5879,11 +5879,11 @@ class ComputeAPITestCase(BaseTestCase):
|
||||
instance = self._create_fake_instance()
|
||||
|
||||
def rule_get(*args, **kwargs):
|
||||
mock_rule = FakeModel({'parent_group_id': 1})
|
||||
mock_rule = db_fakes.FakeModel({'parent_group_id': 1})
|
||||
return [mock_rule]
|
||||
|
||||
def group_get(*args, **kwargs):
|
||||
mock_group = FakeModel({'instances': [instance]})
|
||||
mock_group = db_fakes.FakeModel({'instances': [instance]})
|
||||
return mock_group
|
||||
|
||||
self.stubs.Set(
|
||||
@ -5908,11 +5908,11 @@ class ComputeAPITestCase(BaseTestCase):
|
||||
instance = self._create_fake_instance()
|
||||
|
||||
def rule_get(*args, **kwargs):
|
||||
mock_rule = FakeModel({'parent_group_id': 1})
|
||||
mock_rule = db_fakes.FakeModel({'parent_group_id': 1})
|
||||
return [mock_rule]
|
||||
|
||||
def group_get(*args, **kwargs):
|
||||
mock_group = FakeModel({'instances': [instance]})
|
||||
mock_group = db_fakes.FakeModel({'instances': [instance]})
|
||||
return mock_group
|
||||
|
||||
self.stubs.Set(
|
||||
@ -5935,11 +5935,11 @@ class ComputeAPITestCase(BaseTestCase):
|
||||
|
||||
def test_secgroup_refresh_none(self):
|
||||
def rule_get(*args, **kwargs):
|
||||
mock_rule = FakeModel({'parent_group_id': 1})
|
||||
mock_rule = db_fakes.FakeModel({'parent_group_id': 1})
|
||||
return [mock_rule]
|
||||
|
||||
def group_get(*args, **kwargs):
|
||||
mock_group = FakeModel({'instances': []})
|
||||
mock_group = db_fakes.FakeModel({'instances': []})
|
||||
return mock_group
|
||||
|
||||
self.stubs.Set(
|
||||
@ -5957,7 +5957,7 @@ class ComputeAPITestCase(BaseTestCase):
|
||||
instance = self._create_fake_instance()
|
||||
|
||||
def group_get(*args, **kwargs):
|
||||
mock_group = FakeModel({'instances': [instance]})
|
||||
mock_group = db_fakes.FakeModel({'instances': [instance]})
|
||||
return mock_group
|
||||
|
||||
self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
|
||||
@ -5978,7 +5978,7 @@ class ComputeAPITestCase(BaseTestCase):
|
||||
instance = self._create_fake_instance()
|
||||
|
||||
def group_get(*args, **kwargs):
|
||||
mock_group = FakeModel({'instances': [instance]})
|
||||
mock_group = db_fakes.FakeModel({'instances': [instance]})
|
||||
return mock_group
|
||||
|
||||
self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
|
||||
@ -5997,7 +5997,7 @@ class ComputeAPITestCase(BaseTestCase):
|
||||
|
||||
def test_secrule_refresh_none(self):
|
||||
def group_get(*args, **kwargs):
|
||||
mock_group = FakeModel({'instances': []})
|
||||
mock_group = db_fakes.FakeModel({'instances': []})
|
||||
return mock_group
|
||||
|
||||
self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
|
||||
|
@ -22,13 +22,13 @@ import re
|
||||
import urllib
|
||||
import uuid as uuid_lib
|
||||
|
||||
from coverage import coverage
|
||||
import coverage
|
||||
from lxml import etree
|
||||
|
||||
from nova.api.metadata import password
|
||||
from nova.api.openstack.compute.contrib import coverage_ext
|
||||
# Import extensions to pull in osapi_compute_extension CONF option used below.
|
||||
from nova.cloudpipe.pipelib import CloudPipe
|
||||
from nova.cloudpipe import pipelib
|
||||
from nova import context
|
||||
from nova import db
|
||||
from nova.db.sqlalchemy import models
|
||||
@ -761,7 +761,7 @@ class CoverageExtJsonTests(ApiSampleTestBase):
|
||||
|
||||
self.stubs.Set(coverage_ext.CoverageController, '_check_coverage',
|
||||
_fake_check_coverage)
|
||||
self.stubs.Set(coverage, 'xml_report', _fake_xml_report)
|
||||
self.stubs.Set(coverage.coverage, 'xml_report', _fake_xml_report)
|
||||
|
||||
def test_start_coverage(self):
|
||||
# Start coverage data collection.
|
||||
@ -1512,7 +1512,7 @@ class CloudPipeSampleJsonTest(ApiSampleTestBase):
|
||||
return {'vpn_public_address': '127.0.0.1',
|
||||
'vpn_public_port': 22}
|
||||
|
||||
self.stubs.Set(CloudPipe, 'get_encoded_zip', get_user_data)
|
||||
self.stubs.Set(pipelib.CloudPipe, 'get_encoded_zip', get_user_data)
|
||||
self.stubs.Set(network_manager.NetworkManager, "get_network",
|
||||
network_api_get)
|
||||
|
||||
|
@ -25,7 +25,7 @@ from nova.openstack.common import jsonutils
|
||||
from nova.openstack.common import timeutils
|
||||
from nova.scheduler import filters
|
||||
from nova.scheduler.filters import extra_specs_ops
|
||||
from nova.scheduler.filters.trusted_filter import AttestationService
|
||||
from nova.scheduler.filters import trusted_filter
|
||||
from nova import servicegroup
|
||||
from nova import test
|
||||
from nova.tests.scheduler import fakes
|
||||
@ -242,7 +242,8 @@ class HostFiltersTestCase(test.TestCase):
|
||||
self.oat_data = ''
|
||||
self.oat_attested = False
|
||||
self.stubs = stubout.StubOutForTesting()
|
||||
self.stubs.Set(AttestationService, '_request', self.fake_oat_request)
|
||||
self.stubs.Set(trusted_filter.AttestationService, '_request',
|
||||
self.fake_oat_request)
|
||||
self.context = context.RequestContext('fake', 'fake')
|
||||
self.json_query = jsonutils.dumps(
|
||||
['and', ['>=', '$free_ram_mb', 1024],
|
||||
|
@ -20,7 +20,7 @@ import netaddr
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
from nova.compute.manager import ComputeManager
|
||||
from nova.compute import manager
|
||||
from nova import exception
|
||||
from nova.openstack.common import importutils
|
||||
from nova.openstack.common import log as logging
|
||||
@ -159,7 +159,7 @@ class VirtDriverLoaderTestCase(_FakeDriverBackendTestCase, test.TestCase):
|
||||
# NOTE(sdague) the try block is to make it easier to debug a
|
||||
# failure by knowing which driver broke
|
||||
try:
|
||||
cm = ComputeManager()
|
||||
cm = manager.ComputeManager()
|
||||
except Exception as e:
|
||||
self.fail("Couldn't load driver %s - %s" % (cls, e))
|
||||
|
||||
@ -173,7 +173,7 @@ class VirtDriverLoaderTestCase(_FakeDriverBackendTestCase, test.TestCase):
|
||||
raise test.TestingException()
|
||||
|
||||
self.stubs.Set(sys, 'exit', _fake_exit)
|
||||
self.assertRaises(test.TestingException, ComputeManager)
|
||||
self.assertRaises(test.TestingException, manager.ComputeManager)
|
||||
|
||||
|
||||
class _VirtDriverTestCase(_FakeDriverBackendTestCase):
|
||||
|
@ -12,4 +12,6 @@
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
from nova.virt.baremetal.driver import BareMetalDriver
|
||||
from nova.virt.baremetal import driver
|
||||
|
||||
BareMetalDriver = driver.BareMetalDriver
|
||||
|
@ -23,14 +23,13 @@
|
||||
from sqlalchemy.sql.expression import asc
|
||||
from sqlalchemy.sql.expression import literal_column
|
||||
|
||||
from nova.db.sqlalchemy.api import is_user_context
|
||||
from nova.db.sqlalchemy.api import require_admin_context
|
||||
from nova.db.sqlalchemy import api as sqlalchemy_api
|
||||
from nova import exception
|
||||
from nova.openstack.common import log as logging
|
||||
from nova.openstack.common import timeutils
|
||||
from nova.openstack.common import uuidutils
|
||||
from nova.virt.baremetal.db.sqlalchemy import models
|
||||
from nova.virt.baremetal.db.sqlalchemy.session import get_session
|
||||
from nova.virt.baremetal.db.sqlalchemy import session as db_session
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
@ -44,7 +43,7 @@ def model_query(context, *args, **kwargs):
|
||||
:param project_only: if present and context is user-type, then restrict
|
||||
query to match the context's project_id.
|
||||
"""
|
||||
session = kwargs.get('session') or get_session()
|
||||
session = kwargs.get('session') or db_session.get_session()
|
||||
read_deleted = kwargs.get('read_deleted') or context.read_deleted
|
||||
project_only = kwargs.get('project_only')
|
||||
|
||||
@ -60,7 +59,7 @@ def model_query(context, *args, **kwargs):
|
||||
raise Exception(
|
||||
_("Unrecognized read_deleted value '%s'") % read_deleted)
|
||||
|
||||
if project_only and is_user_context(context):
|
||||
if project_only and sqlalchemy_api.is_user_context(context):
|
||||
query = query.filter_by(project_id=context.project_id)
|
||||
|
||||
return query
|
||||
@ -68,7 +67,7 @@ def model_query(context, *args, **kwargs):
|
||||
|
||||
def _save(ref, session=None):
|
||||
if not session:
|
||||
session = get_session()
|
||||
session = db_session.get_session()
|
||||
# We must not call ref.save() with session=None, otherwise NovaBase
|
||||
# uses nova-db's session, which cannot access bm-db.
|
||||
ref.save(session=session)
|
||||
@ -81,7 +80,7 @@ def _build_node_order_by(query):
|
||||
return query
|
||||
|
||||
|
||||
@require_admin_context
|
||||
@sqlalchemy_api.require_admin_context
|
||||
def bm_node_get_all(context, service_host=None):
|
||||
query = model_query(context, models.BareMetalNode, read_deleted="no")
|
||||
if service_host:
|
||||
@ -89,7 +88,7 @@ def bm_node_get_all(context, service_host=None):
|
||||
return query.all()
|
||||
|
||||
|
||||
@require_admin_context
|
||||
@sqlalchemy_api.require_admin_context
|
||||
def bm_node_find_free(context, service_host=None,
|
||||
cpus=None, memory_mb=None, local_gb=None):
|
||||
query = model_query(context, models.BareMetalNode, read_deleted="no")
|
||||
@ -106,7 +105,7 @@ def bm_node_find_free(context, service_host=None,
|
||||
return query.first()
|
||||
|
||||
|
||||
@require_admin_context
|
||||
@sqlalchemy_api.require_admin_context
|
||||
def bm_node_get(context, bm_node_id):
|
||||
# bm_node_id may be passed as a string. Convert to INT to improve DB perf.
|
||||
bm_node_id = int(bm_node_id)
|
||||
@ -120,7 +119,7 @@ def bm_node_get(context, bm_node_id):
|
||||
return result
|
||||
|
||||
|
||||
@require_admin_context
|
||||
@sqlalchemy_api.require_admin_context
|
||||
def bm_node_get_by_instance_uuid(context, instance_uuid):
|
||||
if not uuidutils.is_uuid_like(instance_uuid):
|
||||
raise exception.InstanceNotFound(instance_id=instance_uuid)
|
||||
@ -135,7 +134,7 @@ def bm_node_get_by_instance_uuid(context, instance_uuid):
|
||||
return result
|
||||
|
||||
|
||||
@require_admin_context
|
||||
@sqlalchemy_api.require_admin_context
|
||||
def bm_node_create(context, values):
|
||||
bm_node_ref = models.BareMetalNode()
|
||||
bm_node_ref.update(values)
|
||||
@ -143,14 +142,14 @@ def bm_node_create(context, values):
|
||||
return bm_node_ref
|
||||
|
||||
|
||||
@require_admin_context
|
||||
@sqlalchemy_api.require_admin_context
|
||||
def bm_node_update(context, bm_node_id, values):
|
||||
model_query(context, models.BareMetalNode, read_deleted="no").\
|
||||
filter_by(id=bm_node_id).\
|
||||
update(values)
|
||||
|
||||
|
||||
@require_admin_context
|
||||
@sqlalchemy_api.require_admin_context
|
||||
def bm_node_set_uuid_safe(context, bm_node_id, values):
|
||||
"""Associate an instance to a node safely
|
||||
|
||||
@ -164,7 +163,7 @@ def bm_node_set_uuid_safe(context, bm_node_id, values):
|
||||
raise exception.NovaException(_(
|
||||
"instance_uuid must be supplied to bm_node_set_uuid_safe"))
|
||||
|
||||
session = get_session()
|
||||
session = db_session.get_session()
|
||||
with session.begin():
|
||||
query = model_query(context, models.BareMetalNode,
|
||||
session=session, read_deleted="no").\
|
||||
@ -181,7 +180,7 @@ def bm_node_set_uuid_safe(context, bm_node_id, values):
|
||||
return ref
|
||||
|
||||
|
||||
@require_admin_context
|
||||
@sqlalchemy_api.require_admin_context
|
||||
def bm_node_destroy(context, bm_node_id):
|
||||
model_query(context, models.BareMetalNode).\
|
||||
filter_by(id=bm_node_id).\
|
||||
@ -190,13 +189,13 @@ def bm_node_destroy(context, bm_node_id):
|
||||
'updated_at': literal_column('updated_at')})
|
||||
|
||||
|
||||
@require_admin_context
|
||||
@sqlalchemy_api.require_admin_context
|
||||
def bm_pxe_ip_get_all(context):
|
||||
query = model_query(context, models.BareMetalPxeIp, read_deleted="no")
|
||||
return query.all()
|
||||
|
||||
|
||||
@require_admin_context
|
||||
@sqlalchemy_api.require_admin_context
|
||||
def bm_pxe_ip_create(context, address, server_address):
|
||||
ref = models.BareMetalPxeIp()
|
||||
ref.address = address
|
||||
@ -205,7 +204,7 @@ def bm_pxe_ip_create(context, address, server_address):
|
||||
return ref
|
||||
|
||||
|
||||
@require_admin_context
|
||||
@sqlalchemy_api.require_admin_context
|
||||
def bm_pxe_ip_create_direct(context, bm_pxe_ip):
|
||||
ref = bm_pxe_ip_create(context,
|
||||
address=bm_pxe_ip['address'],
|
||||
@ -213,7 +212,7 @@ def bm_pxe_ip_create_direct(context, bm_pxe_ip):
|
||||
return ref
|
||||
|
||||
|
||||
@require_admin_context
|
||||
@sqlalchemy_api.require_admin_context
|
||||
def bm_pxe_ip_destroy(context, ip_id):
|
||||
# Delete physically since it has unique columns
|
||||
model_query(context, models.BareMetalPxeIp, read_deleted="no").\
|
||||
@ -221,7 +220,7 @@ def bm_pxe_ip_destroy(context, ip_id):
|
||||
delete()
|
||||
|
||||
|
||||
@require_admin_context
|
||||
@sqlalchemy_api.require_admin_context
|
||||
def bm_pxe_ip_destroy_by_address(context, address):
|
||||
# Delete physically since it has unique columns
|
||||
model_query(context, models.BareMetalPxeIp, read_deleted="no").\
|
||||
@ -229,7 +228,7 @@ def bm_pxe_ip_destroy_by_address(context, address):
|
||||
delete()
|
||||
|
||||
|
||||
@require_admin_context
|
||||
@sqlalchemy_api.require_admin_context
|
||||
def bm_pxe_ip_get(context, ip_id):
|
||||
result = model_query(context, models.BareMetalPxeIp, read_deleted="no").\
|
||||
filter_by(id=ip_id).\
|
||||
@ -238,7 +237,7 @@ def bm_pxe_ip_get(context, ip_id):
|
||||
return result
|
||||
|
||||
|
||||
@require_admin_context
|
||||
@sqlalchemy_api.require_admin_context
|
||||
def bm_pxe_ip_get_by_bm_node_id(context, bm_node_id):
|
||||
result = model_query(context, models.BareMetalPxeIp, read_deleted="no").\
|
||||
filter_by(bm_node_id=bm_node_id).\
|
||||
@ -250,9 +249,9 @@ def bm_pxe_ip_get_by_bm_node_id(context, bm_node_id):
|
||||
return result
|
||||
|
||||
|
||||
@require_admin_context
|
||||
@sqlalchemy_api.require_admin_context
|
||||
def bm_pxe_ip_associate(context, bm_node_id):
|
||||
session = get_session()
|
||||
session = db_session.get_session()
|
||||
with session.begin():
|
||||
# Check if the node really exists
|
||||
node_ref = model_query(context, models.BareMetalNode,
|
||||
@ -288,14 +287,14 @@ def bm_pxe_ip_associate(context, bm_node_id):
|
||||
return ip_ref.id
|
||||
|
||||
|
||||
@require_admin_context
|
||||
@sqlalchemy_api.require_admin_context
|
||||
def bm_pxe_ip_disassociate(context, bm_node_id):
|
||||
model_query(context, models.BareMetalPxeIp, read_deleted="no").\
|
||||
filter_by(bm_node_id=bm_node_id).\
|
||||
update({'bm_node_id': None})
|
||||
|
||||
|
||||
@require_admin_context
|
||||
@sqlalchemy_api.require_admin_context
|
||||
def bm_interface_get(context, if_id):
|
||||
result = model_query(context, models.BareMetalInterface,
|
||||
read_deleted="no").\
|
||||
@ -309,14 +308,14 @@ def bm_interface_get(context, if_id):
|
||||
return result
|
||||
|
||||
|
||||
@require_admin_context
|
||||
@sqlalchemy_api.require_admin_context
|
||||
def bm_interface_get_all(context):
|
||||
query = model_query(context, models.BareMetalInterface,
|
||||
read_deleted="no")
|
||||
return query.all()
|
||||
|
||||
|
||||
@require_admin_context
|
||||
@sqlalchemy_api.require_admin_context
|
||||
def bm_interface_destroy(context, if_id):
|
||||
# Delete physically since it has unique columns
|
||||
model_query(context, models.BareMetalInterface, read_deleted="no").\
|
||||
@ -324,7 +323,7 @@ def bm_interface_destroy(context, if_id):
|
||||
delete()
|
||||
|
||||
|
||||
@require_admin_context
|
||||
@sqlalchemy_api.require_admin_context
|
||||
def bm_interface_create(context, bm_node_id, address, datapath_id, port_no):
|
||||
ref = models.BareMetalInterface()
|
||||
ref.bm_node_id = bm_node_id
|
||||
@ -335,9 +334,9 @@ def bm_interface_create(context, bm_node_id, address, datapath_id, port_no):
|
||||
return ref.id
|
||||
|
||||
|
||||
@require_admin_context
|
||||
@sqlalchemy_api.require_admin_context
|
||||
def bm_interface_set_vif_uuid(context, if_id, vif_uuid):
|
||||
session = get_session()
|
||||
session = db_session.get_session()
|
||||
with session.begin():
|
||||
bm_interface = model_query(context, models.BareMetalInterface,
|
||||
read_deleted="no", session=session).\
|
||||
@ -361,7 +360,7 @@ def bm_interface_set_vif_uuid(context, if_id, vif_uuid):
|
||||
raise e
|
||||
|
||||
|
||||
@require_admin_context
|
||||
@sqlalchemy_api.require_admin_context
|
||||
def bm_interface_get_by_vif_uuid(context, vif_uuid):
|
||||
result = model_query(context, models.BareMetalInterface,
|
||||
read_deleted="no").\
|
||||
@ -375,7 +374,7 @@ def bm_interface_get_by_vif_uuid(context, vif_uuid):
|
||||
return result
|
||||
|
||||
|
||||
@require_admin_context
|
||||
@sqlalchemy_api.require_admin_context
|
||||
def bm_interface_get_all_by_bm_node_id(context, bm_node_id):
|
||||
result = model_query(context, models.BareMetalInterface,
|
||||
read_deleted="no").\
|
||||
@ -388,7 +387,7 @@ def bm_interface_get_all_by_bm_node_id(context, bm_node_id):
|
||||
return result
|
||||
|
||||
|
||||
@require_admin_context
|
||||
@sqlalchemy_api.require_admin_context
|
||||
def bm_deployment_create(context, key, image_path, pxe_config_path, root_mb,
|
||||
swap_mb):
|
||||
ref = models.BareMetalDeployment()
|
||||
@ -401,7 +400,7 @@ def bm_deployment_create(context, key, image_path, pxe_config_path, root_mb,
|
||||
return ref.id
|
||||
|
||||
|
||||
@require_admin_context
|
||||
@sqlalchemy_api.require_admin_context
|
||||
def bm_deployment_get(context, dep_id):
|
||||
result = model_query(context, models.BareMetalDeployment,
|
||||
read_deleted="no").\
|
||||
@ -410,7 +409,7 @@ def bm_deployment_get(context, dep_id):
|
||||
return result
|
||||
|
||||
|
||||
@require_admin_context
|
||||
@sqlalchemy_api.require_admin_context
|
||||
def bm_deployment_destroy(context, dep_id):
|
||||
model_query(context, models.BareMetalDeployment).\
|
||||
filter_by(id=dep_id).\
|
||||
|
@ -25,7 +25,7 @@ import sqlalchemy
|
||||
from nova import exception
|
||||
from nova.openstack.common import log as logging
|
||||
from nova.virt.baremetal.db import migration
|
||||
from nova.virt.baremetal.db.sqlalchemy.session import get_engine
|
||||
from nova.virt.baremetal.db.sqlalchemy import session
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
@ -71,24 +71,25 @@ def db_sync(version=None):
|
||||
current_version = db_version()
|
||||
repository = _find_migrate_repo()
|
||||
if version is None or version > current_version:
|
||||
return versioning_api.upgrade(get_engine(), repository, version)
|
||||
return versioning_api.upgrade(session.get_engine(), repository,
|
||||
version)
|
||||
else:
|
||||
return versioning_api.downgrade(get_engine(), repository,
|
||||
return versioning_api.downgrade(session.get_engine(), repository,
|
||||
version)
|
||||
|
||||
|
||||
def db_version():
|
||||
repository = _find_migrate_repo()
|
||||
try:
|
||||
return versioning_api.db_version(get_engine(), repository)
|
||||
return versioning_api.db_version(session.get_engine(), repository)
|
||||
except versioning_exceptions.DatabaseNotControlledError:
|
||||
meta = sqlalchemy.MetaData()
|
||||
engine = get_engine()
|
||||
engine = session.get_engine()
|
||||
meta.reflect(bind=engine)
|
||||
tables = meta.tables
|
||||
if len(tables) == 0:
|
||||
db_version_control(migration.INIT_VERSION)
|
||||
return versioning_api.db_version(get_engine(), repository)
|
||||
return versioning_api.db_version(session.get_engine(), repository)
|
||||
else:
|
||||
# Some pre-Essex DB's may not be version controlled.
|
||||
# Require them to upgrade using Essex first.
|
||||
@ -98,7 +99,7 @@ def db_version():
|
||||
|
||||
def db_version_control(version=None):
|
||||
repository = _find_migrate_repo()
|
||||
versioning_api.version_control(get_engine(), repository, version)
|
||||
versioning_api.version_control(session.get_engine(), repository, version)
|
||||
return version
|
||||
|
||||
|
||||
|
@ -17,7 +17,7 @@
|
||||
# under the License.
|
||||
|
||||
from nova.virt.baremetal import base
|
||||
from nova.virt.firewall import NoopFirewallDriver
|
||||
from nova.virt import firewall
|
||||
|
||||
|
||||
class FakeDriver(base.NodeDriver):
|
||||
@ -52,7 +52,7 @@ class FakePowerManager(base.PowerManager):
|
||||
super(FakePowerManager, self).__init__(**kwargs)
|
||||
|
||||
|
||||
class FakeFirewallDriver(NoopFirewallDriver):
|
||||
class FakeFirewallDriver(firewall.NoopFirewallDriver):
|
||||
|
||||
def __init__(self):
|
||||
super(FakeFirewallDriver, self).__init__()
|
||||
|
@ -25,7 +25,7 @@ import os
|
||||
import stat
|
||||
import tempfile
|
||||
|
||||
from nova.exception import InvalidParameterValue
|
||||
from nova import exception
|
||||
from nova.openstack.common import cfg
|
||||
from nova.openstack.common import log as logging
|
||||
from nova import paths
|
||||
@ -104,13 +104,17 @@ class IPMI(base.PowerManager):
|
||||
self.port = node['terminal_port']
|
||||
|
||||
if self.node_id == None:
|
||||
raise InvalidParameterValue(_("Node id not supplied to IPMI"))
|
||||
raise exception.InvalidParameterValue(_("Node id not supplied "
|
||||
"to IPMI"))
|
||||
if self.address == None:
|
||||
raise InvalidParameterValue(_("Address not supplied to IPMI"))
|
||||
raise exception.InvalidParameterValue(_("Address not supplied "
|
||||
"to IPMI"))
|
||||
if self.user == None:
|
||||
raise InvalidParameterValue(_("User not supplied to IPMI"))
|
||||
raise exception.InvalidParameterValue(_("User not supplied "
|
||||
"to IPMI"))
|
||||
if self.password == None:
|
||||
raise InvalidParameterValue(_("Password not supplied to IPMI"))
|
||||
raise exception.InvalidParameterValue(_("Password not supplied "
|
||||
"to IPMI"))
|
||||
|
||||
def _exec_ipmitool(self, command):
|
||||
args = ['ipmitool',
|
||||
|
@ -67,7 +67,8 @@ CHEETAH = None
|
||||
def _get_cheetah():
|
||||
global CHEETAH
|
||||
if CHEETAH is None:
|
||||
from Cheetah.Template import Template as CHEETAH
|
||||
from Cheetah import Template
|
||||
CHEETAH = Template.Template
|
||||
return CHEETAH
|
||||
|
||||
|
||||
|
@ -14,4 +14,6 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from nova.virt.libvirt.driver import LibvirtDriver
|
||||
from nova.virt.libvirt import driver
|
||||
|
||||
LibvirtDriver = driver.LibvirtDriver
|
||||
|
@ -26,4 +26,6 @@ refer to the IBM Redbook[1] publication.
|
||||
May 2011. <http://www.redbooks.ibm.com/abstracts/sg247940.html>
|
||||
"""
|
||||
|
||||
from nova.virt.powervm.driver import PowerVMDriver
|
||||
from nova.virt.powervm import driver
|
||||
|
||||
PowerVMDriver = driver.PowerVMDriver
|
||||
|
@ -18,4 +18,6 @@
|
||||
:mod:`vmwareapi` -- Nova support for VMware ESX/ESXi Server through VMware API.
|
||||
"""
|
||||
# NOTE(sdague) for nicer compute_driver specification
|
||||
from nova.virt.vmwareapi.driver import VMwareESXDriver
|
||||
from nova.virt.vmwareapi import driver
|
||||
|
||||
VMwareESXDriver = driver.VMwareESXDriver
|
||||
|
@ -18,4 +18,6 @@
|
||||
:mod:`xenapi` -- Nova support for XenServer and XCP through XenAPI
|
||||
==================================================================
|
||||
"""
|
||||
from nova.virt.xenapi.driver import XenAPIDriver
|
||||
from nova.virt.xenapi import driver
|
||||
|
||||
XenAPIDriver = driver.XenAPIDriver
|
||||
|
@ -132,7 +132,7 @@ function run_pep8 {
|
||||
srcfiles+=" setup.py"
|
||||
|
||||
# Until all these issues get fixed, ignore.
|
||||
ignore='--ignore=E12,E711,E721,E712,N403,N404,N302'
|
||||
ignore='--ignore=E12,E711,E721,E712,N403,N404'
|
||||
|
||||
# First run the hacking selftest, to make sure it's right
|
||||
echo "Running hacking.py self test"
|
||||
|
6
tox.ini
6
tox.ini
@ -18,11 +18,9 @@ downloadcache = ~/cache/pip
|
||||
deps=pep8==1.3.3
|
||||
commands =
|
||||
python tools/hacking.py --doctest
|
||||
python tools/hacking.py --ignore=E12,E711,E721,E712,N403,N404,N302 \
|
||||
--show-source \
|
||||
python tools/hacking.py --ignore=E12,E711,E721,E712,N403,N404 --show-source \
|
||||
--exclude=.venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg .
|
||||
python tools/hacking.py --ignore=E12,E711,E721,E712,N403,N404,N302 \
|
||||
--show-source \
|
||||
python tools/hacking.py --ignore=E12,E711,E721,E712,N403,N404 --show-source \
|
||||
--filename=nova* bin
|
||||
|
||||
[testenv:pylint]
|
||||
|
Loading…
Reference in New Issue
Block a user