Blackify everything else

Black used with the '-l 79 -S' flags.

A future change will ignore this commit in git-blame history by adding a
'git-blame-ignore-revs' file.

Change-Id: Ie106a5cec8831e113a2b764b62b712a205e3153b
Signed-off-by: Stephen Finucane <stephenfin@redhat.com>
This commit is contained in:
Stephen Finucane 2023-05-05 11:25:50 +01:00
parent 004c7352d0
commit a36f514295
84 changed files with 4465 additions and 2838 deletions

View File

@ -77,10 +77,13 @@ htmlhelp_basename = 'openstacksdkdoc'
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index',
'doc-openstacksdk.tex',
'OpenStackSDK Documentation',
'OpenStack Foundation', 'manual'),
(
'index',
'doc-openstacksdk.tex',
'OpenStackSDK Documentation',
'OpenStack Foundation',
'manual',
),
]
# Allow deeper levels of nesting for \begin...\end stanzas

View File

@ -16,11 +16,11 @@ from openstack import cloud as openstack
openstack.enable_logging(debug=True)
for cloud_name, region_name in [
('my-vexxhost', 'ca-ymq-1'),
('my-citycloud', 'Buf1'),
('my-internap', 'ams01')]:
('my-vexxhost', 'ca-ymq-1'),
('my-citycloud', 'Buf1'),
('my-internap', 'ams01'),
]:
# Initialize cloud
cloud = openstack.connect(
cloud=cloud_name, region_name=region_name)
cloud = openstack.connect(cloud=cloud_name, region_name=region_name)
for server in cloud.search_servers('my-server'):
cloud.delete_server(server, wait=True, delete_ips=True)

View File

@ -16,20 +16,31 @@ from openstack import cloud as openstack
openstack.enable_logging(debug=True)
for cloud_name, region_name, image, flavor_id in [
('my-vexxhost', 'ca-ymq-1', 'Ubuntu 16.04.1 LTS [2017-03-03]',
'5cf64088-893b-46b5-9bb1-ee020277635d'),
('my-citycloud', 'Buf1', 'Ubuntu 16.04 Xenial Xerus',
'0dab10b5-42a2-438e-be7b-505741a7ffcc'),
('my-internap', 'ams01', 'Ubuntu 16.04 LTS (Xenial Xerus)',
'A1.4')]:
(
'my-vexxhost',
'ca-ymq-1',
'Ubuntu 16.04.1 LTS [2017-03-03]',
'5cf64088-893b-46b5-9bb1-ee020277635d',
),
(
'my-citycloud',
'Buf1',
'Ubuntu 16.04 Xenial Xerus',
'0dab10b5-42a2-438e-be7b-505741a7ffcc',
),
('my-internap', 'ams01', 'Ubuntu 16.04 LTS (Xenial Xerus)', 'A1.4'),
]:
# Initialize cloud
cloud = openstack.connect(
cloud=cloud_name, region_name=region_name)
cloud = openstack.connect(cloud=cloud_name, region_name=region_name)
# Boot a server, wait for it to boot, and then do whatever is needed
# to get a public ip for it.
server = cloud.create_server(
'my-server', image=image, flavor=dict(id=flavor_id),
wait=True, auto_ip=True)
'my-server',
image=image,
flavor=dict(id=flavor_id),
wait=True,
auto_ip=True,
)
# Delete it - this is a demo
cloud.delete_server(server, wait=True, delete_ips=True)

View File

@ -16,21 +16,24 @@ from openstack import cloud as openstack
openstack.enable_logging(debug=True)
for cloud_name, region_name, image, flavor in [
('my-vexxhost', 'ca-ymq-1',
'Ubuntu 16.04.1 LTS [2017-03-03]', 'v1-standard-4'),
('my-citycloud', 'Buf1',
'Ubuntu 16.04 Xenial Xerus', '4C-4GB-100GB'),
('my-internap', 'ams01',
'Ubuntu 16.04 LTS (Xenial Xerus)', 'A1.4')]:
(
'my-vexxhost',
'ca-ymq-1',
'Ubuntu 16.04.1 LTS [2017-03-03]',
'v1-standard-4',
),
('my-citycloud', 'Buf1', 'Ubuntu 16.04 Xenial Xerus', '4C-4GB-100GB'),
('my-internap', 'ams01', 'Ubuntu 16.04 LTS (Xenial Xerus)', 'A1.4'),
]:
# Initialize cloud
cloud = openstack.connect(
cloud=cloud_name, region_name=region_name)
cloud = openstack.connect(cloud=cloud_name, region_name=region_name)
cloud.delete_server('my-server', wait=True, delete_ips=True)
# Boot a server, wait for it to boot, and then do whatever is needed
# to get a public ip for it.
server = cloud.create_server(
'my-server', image=image, flavor=flavor, wait=True, auto_ip=True)
'my-server', image=image, flavor=flavor, wait=True, auto_ip=True
)
print(server.name)
print(server['name'])
cloud.pprint(server)

View File

@ -11,8 +11,8 @@
# under the License.
from openstack import cloud as openstack
openstack.enable_logging(debug=True)
cloud = openstack.connect(
cloud='my-vexxhost', region_name='ca-ymq-1')
cloud = openstack.connect(cloud='my-vexxhost', region_name='ca-ymq-1')
cloud.get_image('Ubuntu 16.04.1 LTS [2017-03-03]')

View File

@ -11,9 +11,10 @@
# under the License.
from openstack import cloud as openstack
openstack.enable_logging()
cloud = openstack.connect(cloud='fuga', region_name='cystack')
cloud.pprint([
image for image in cloud.list_images()
if 'ubuntu' in image.name.lower()])
cloud.pprint(
[image for image in cloud.list_images() if 'ubuntu' in image.name.lower()]
)

View File

@ -11,8 +11,8 @@
# under the License.
from openstack import cloud as openstack
openstack.enable_logging(http_debug=True)
cloud = openstack.connect(
cloud='my-vexxhost', region_name='ca-ymq-1')
cloud = openstack.connect(cloud='my-vexxhost', region_name='ca-ymq-1')
cloud.get_image('Ubuntu 16.04.1 LTS [2017-03-03]')

View File

@ -11,6 +11,7 @@
# under the License.
from openstack import cloud as openstack
openstack.enable_logging(debug=True)
cloud = openstack.connect(cloud='ovh', region_name='SBG1')

View File

@ -11,9 +11,11 @@
# under the License.
from openstack import cloud as openstack
openstack.enable_logging()
cloud = openstack.connect(cloud='fuga', region_name='cystack')
image = cloud.get_image(
'Ubuntu 16.04 LTS - Xenial Xerus - 64-bit - Fuga Cloud Based Image')
'Ubuntu 16.04 LTS - Xenial Xerus - 64-bit - Fuga Cloud Based Image'
)
cloud.pprint(image)

View File

@ -11,14 +11,18 @@
# under the License.
import openstack
openstack.enable_logging(debug=True)
cloud = openstack.connect(cloud='my-citycloud', region_name='Buf1')
try:
server = cloud.create_server(
'my-server', image='Ubuntu 16.04 Xenial Xerus',
'my-server',
image='Ubuntu 16.04 Xenial Xerus',
flavor=dict(id='0dab10b5-42a2-438e-be7b-505741a7ffcc'),
wait=True, auto_ip=True)
wait=True,
auto_ip=True,
)
print("\n\nFull Server\n\n")
cloud.pprint(server)

View File

@ -11,6 +11,7 @@
# under the License.
import openstack
openstack.enable_logging(debug=True)
cloud = openstack.connect(cloud='rax', region_name='DFW')

View File

@ -11,6 +11,7 @@
# under the License.
import openstack
openstack.enable_logging(debug=True)
cloud = openstack.connect(cloud='kiss', region_name='region1')

View File

@ -11,10 +11,11 @@
# under the License.
import openstack
openstack.enable_logging()
cloud = openstack.connect(
cloud='fuga', region_name='cystack', strict=True)
cloud = openstack.connect(cloud='fuga', region_name='cystack', strict=True)
image = cloud.get_image(
'Ubuntu 16.04 LTS - Xenial Xerus - 64-bit - Fuga Cloud Based Image')
'Ubuntu 16.04 LTS - Xenial Xerus - 64-bit - Fuga Cloud Based Image'
)
cloud.pprint(image)

View File

@ -11,12 +11,15 @@
# under the License.
import openstack
openstack.enable_logging(debug=True)
cloud = openstack.connect(cloud='ovh', region_name='SBG1')
cloud.create_object(
container='my-container', name='my-object',
container='my-container',
name='my-object',
filename='/home/mordred/briarcliff.sh3d',
segment_size=1000000)
segment_size=1000000,
)
cloud.delete_object('my-container', 'my-object')
cloud.delete_container('my-container')

View File

@ -11,12 +11,15 @@
# under the License.
import openstack
openstack.enable_logging(debug=True)
cloud = openstack.connect(cloud='ovh', region_name='SBG1')
cloud.create_object(
container='my-container', name='my-object',
container='my-container',
name='my-object',
filename='/home/mordred/briarcliff.sh3d',
segment_size=1000000)
segment_size=1000000,
)
cloud.delete_object('my-container', 'my-object')
cloud.delete_container('my-container')

View File

@ -11,8 +11,10 @@
# under the License.
import openstack
openstack.enable_logging(http_debug=True)
cloud = openstack.connect(
cloud='datacentred', app_name='AmazingApp', app_version='1.0')
cloud='datacentred', app_name='AmazingApp', app_version='1.0'
)
cloud.list_networks()

View File

@ -107,9 +107,7 @@ def replace_nodes_in_cluster(conn):
old_node = NODE_ID
new_node = "cd803d4a-015d-4223-b15f-db29bad3146c"
spec = {
old_node: new_node
}
spec = {old_node: new_node}
res = conn.clustering.replace_nodes_in_cluster(CLUSTER_ID, **spec)
print(res)
@ -135,7 +133,7 @@ def resize_cluster(conn):
'min_size': 1,
'max_size': 6,
'adjustment_type': 'EXACT_CAPACITY',
'number': 2
'number': 2,
}
res = conn.clustering.resize_cluster(CLUSTER_ID, **spec)
print(res)
@ -146,7 +144,8 @@ def attach_policy_to_cluster(conn):
spec = {'enabled': True}
res = conn.clustering.attach_policy_to_cluster(
CLUSTER_ID, POLICY_ID, **spec)
CLUSTER_ID, POLICY_ID, **spec
)
print(res)

View File

@ -38,8 +38,8 @@ def create_policy(conn):
'properties': {
'criteria': 'oldest_first',
'destroy_after_deletion': True,
}
}
},
},
}
policy = conn.clustering.create_policy(attrs)

View File

@ -44,10 +44,8 @@ def create_profile(conn):
'name': SERVER_NAME,
'flavor': FLAVOR_NAME,
'image': IMAGE_NAME,
'networks': {
'network': NETWORK_NAME
}
}
'networks': {'network': NETWORK_NAME},
},
}
profile = conn.clustering.create_profile(spec)

View File

@ -39,10 +39,8 @@ def create_receiver(conn):
"action": "CLUSTER_SCALE_OUT",
"cluster_id": CLUSTER_ID,
"name": FAKE_NAME,
"params": {
"count": "1"
},
"type": "webhook"
"params": {"count": "1"},
"type": "webhook",
}
receiver = conn.clustering.create_receiver(**spec)
@ -66,12 +64,7 @@ def find_receiver(conn):
def update_receiver(conn):
print("Update Receiver:")
spec = {
"name": "test_receiver2",
"params": {
"count": "2"
}
}
spec = {"name": "test_receiver2", "params": {"count": "2"}}
receiver = conn.clustering.update_receiver(FAKE_NAME, **spec)
print(receiver.to_dict())

View File

@ -62,11 +62,17 @@ def create_server(conn):
keypair = create_keypair(conn)
server = conn.compute.create_server(
name=SERVER_NAME, image_id=image.id, flavor_id=flavor.id,
networks=[{"uuid": network.id}], key_name=keypair.name)
name=SERVER_NAME,
image_id=image.id,
flavor_id=flavor.id,
networks=[{"uuid": network.id}],
key_name=keypair.name,
)
server = conn.compute.wait_for_server(server)
print("ssh -i {key} root@{ip}".format(
key=PRIVATE_KEYPAIR_FILE,
ip=server.access_ipv4))
print(
"ssh -i {key} root@{ip}".format(
key=PRIVATE_KEYPAIR_FILE, ip=server.access_ipv4
)
)

View File

@ -45,8 +45,9 @@ class Opts:
def _get_resource_value(resource_key, default):
return config.get_extra_config(
EXAMPLE_CONFIG_KEY).get(resource_key, default)
return config.get_extra_config(EXAMPLE_CONFIG_KEY).get(
resource_key, default
)
SERVER_NAME = 'openstacksdk-example'
@ -55,10 +56,12 @@ FLAVOR_NAME = _get_resource_value('flavor_name', 'm1.small')
NETWORK_NAME = _get_resource_value('network_name', 'private')
KEYPAIR_NAME = _get_resource_value('keypair_name', 'openstacksdk-example')
SSH_DIR = _get_resource_value(
'ssh_dir', '{home}/.ssh'.format(home=os.path.expanduser("~")))
'ssh_dir', '{home}/.ssh'.format(home=os.path.expanduser("~"))
)
PRIVATE_KEYPAIR_FILE = _get_resource_value(
'private_keypair_file', '{ssh_dir}/id_rsa.{key}'.format(
ssh_dir=SSH_DIR, key=KEYPAIR_NAME))
'private_keypair_file',
'{ssh_dir}/id_rsa.{key}'.format(ssh_dir=SSH_DIR, key=KEYPAIR_NAME),
)
EXAMPLE_IMAGE_NAME = 'openstacksdk-example-public-image'
@ -72,8 +75,15 @@ def create_connection_from_args():
return openstack.connect(options=parser)
def create_connection(auth_url, region, project_name, username, password,
user_domain, project_domain):
def create_connection(
auth_url,
region,
project_name,
username,
password,
user_domain,
project_domain,
):
return openstack.connect(
auth_url=auth_url,
project_name=project_name,

View File

@ -24,8 +24,10 @@ def import_image(conn):
print("Import Image:")
# Url where glance can download the image
uri = 'https://download.cirros-cloud.net/0.4.0/' \
'cirros-0.4.0-x86_64-disk.img'
uri = (
'https://download.cirros-cloud.net/0.4.0/'
'cirros-0.4.0-x86_64-disk.img'
)
# Build the image attributes and import the image.
image_attrs = {

View File

@ -18,8 +18,10 @@ List resources from the Key Manager service.
def create_secret(conn):
print("Create a secret:")
conn.key_manager.create_secret(name="My public key",
secret_type="public",
expiration="2020-02-28T23:59:59",
payload="ssh rsa...",
payload_content_type="text/plain")
conn.key_manager.create_secret(
name="My public key",
secret_type="public",
expiration="2020-02-28T23:59:59",
payload="ssh rsa...",
payload_content_type="text/plain",
)

View File

@ -26,6 +26,6 @@ def list_secrets_query(conn):
print("List Secrets:")
for secret in conn.key_manager.secrets(
secret_type="symmetric",
expiration="gte:2020-01-01T00:00:00"):
secret_type="symmetric", expiration="gte:2020-01-01T00:00:00"
):
print(secret)

View File

@ -22,7 +22,8 @@ def create_network(conn):
print("Create Network:")
example_network = conn.network.create_network(
name='openstacksdk-example-project-network')
name='openstacksdk-example-project-network'
)
print(example_network)
@ -31,6 +32,7 @@ def create_network(conn):
network_id=example_network.id,
ip_version='4',
cidr='10.0.2.0/24',
gateway_ip='10.0.2.1')
gateway_ip='10.0.2.1',
)
print(example_subnet)

View File

@ -22,7 +22,8 @@ def delete_network(conn):
print("Delete Network:")
example_network = conn.network.find_network(
'openstacksdk-example-project-network')
'openstacksdk-example-project-network'
)
for example_subnet in example_network.subnet_ids:
conn.network.delete_subnet(example_subnet, ignore_missing=False)

View File

@ -22,7 +22,8 @@ def open_port(conn):
print("Open a port:")
example_sec_group = conn.network.create_security_group(
name='openstacksdk-example-security-group')
name='openstacksdk-example-security-group'
)
print(example_sec_group)
@ -33,7 +34,8 @@ def open_port(conn):
protocol='HTTPS',
port_range_max='443',
port_range_min='443',
ethertype='IPv4')
ethertype='IPv4',
)
print(example_rule)
@ -42,7 +44,8 @@ def allow_ping(conn):
print("Allow pings:")
example_sec_group = conn.network.create_security_group(
name='openstacksdk-example-security-group2')
name='openstacksdk-example-security-group2'
)
print(example_sec_group)
@ -53,6 +56,7 @@ def allow_ping(conn):
protocol='icmp',
port_range_max=None,
port_range_min=None,
ethertype='IPv4')
ethertype='IPv4',
)
print(example_rule)

View File

@ -31,8 +31,10 @@ def get_share_instance(conn, share_instance_id):
def reset_share_instance_status(conn, share_instance_id, status):
print('Reset the status of the share instance with the given '
'share_instance_id to the given status')
print(
'Reset the status of the share instance with the given '
'share_instance_id to the given status'
)
conn.share.reset_share_instance_status(share_instance_id, status)

View File

@ -19,16 +19,18 @@ import pbr.version
def show_version(args):
print("OpenstackSDK Version %s" %
pbr.version.VersionInfo('openstacksdk').version_string_with_vcs())
print(
"OpenstackSDK Version %s"
% pbr.version.VersionInfo('openstacksdk').version_string_with_vcs()
)
parser = argparse.ArgumentParser(description="Openstack SDK")
subparsers = parser.add_subparsers(title='commands',
dest='command')
subparsers = parser.add_subparsers(title='commands', dest='command')
cmd_version = subparsers.add_parser('version',
help='show Openstack SDK version')
cmd_version = subparsers.add_parser(
'version', help='show Openstack SDK version'
)
cmd_version.set_defaults(func=show_version)
args = parser.parse_args()

View File

@ -44,7 +44,10 @@ def setup_logging(name, handlers=None, level=None):
def enable_logging(
debug=False, http_debug=False, path=None, stream=None,
debug=False,
http_debug=False,
path=None,
stream=None,
format_stream=False,
format_template='%(asctime)s %(levelname)s: %(name)s %(message)s',
handlers=None,
@ -121,9 +124,11 @@ def enable_logging(
# enable_logging should not be used and instead python logging should
# be configured directly.
setup_logging(
'urllib3', handlers=[logging.NullHandler()], level=logging.INFO)
'urllib3', handlers=[logging.NullHandler()], level=logging.INFO
)
setup_logging(
'stevedore', handlers=[logging.NullHandler()], level=logging.INFO)
'stevedore', handlers=[logging.NullHandler()], level=logging.INFO
)
# Suppress warning about keystoneauth loggers
setup_logging('keystoneauth.discovery')
setup_logging('keystoneauth.identity.base')

View File

@ -6,7 +6,9 @@ from openstack.baremetal_introspection import baremetal_introspection_service
from openstack.block_storage import block_storage_service
from openstack.clustering import clustering_service
from openstack.compute import compute_service
from openstack.container_infrastructure_management import container_infrastructure_management_service
from openstack.container_infrastructure_management import (
container_infrastructure_management_service,
)
from openstack.database import database_service
from openstack.dns import dns_service
from openstack.identity import identity_service
@ -31,32 +33,52 @@ class ServicesMixin:
image = image_service.ImageService(service_type='image')
load_balancer = load_balancer_service.LoadBalancerService(service_type='load-balancer')
load_balancer = load_balancer_service.LoadBalancerService(
service_type='load-balancer'
)
object_store = object_store_service.ObjectStoreService(service_type='object-store')
object_store = object_store_service.ObjectStoreService(
service_type='object-store'
)
clustering = clustering_service.ClusteringService(service_type='clustering')
clustering = clustering_service.ClusteringService(
service_type='clustering'
)
resource_cluster = clustering
cluster = clustering
data_processing = service_description.ServiceDescription(service_type='data-processing')
data_processing = service_description.ServiceDescription(
service_type='data-processing'
)
baremetal = baremetal_service.BaremetalService(service_type='baremetal')
bare_metal = baremetal
baremetal_introspection = baremetal_introspection_service.BaremetalIntrospectionService(service_type='baremetal-introspection')
baremetal_introspection = (
baremetal_introspection_service.BaremetalIntrospectionService(
service_type='baremetal-introspection'
)
)
key_manager = key_manager_service.KeyManagerService(service_type='key-manager')
key_manager = key_manager_service.KeyManagerService(
service_type='key-manager'
)
resource_optimization = service_description.ServiceDescription(service_type='resource-optimization')
resource_optimization = service_description.ServiceDescription(
service_type='resource-optimization'
)
infra_optim = resource_optimization
message = message_service.MessageService(service_type='message')
messaging = message
application_catalog = service_description.ServiceDescription(service_type='application-catalog')
application_catalog = service_description.ServiceDescription(
service_type='application-catalog'
)
container_infrastructure_management = container_infrastructure_management_service.ContainerInfrastructureManagementService(service_type='container-infrastructure-management')
container_infrastructure_management = container_infrastructure_management_service.ContainerInfrastructureManagementService(
service_type='container-infrastructure-management'
)
container_infra = container_infrastructure_management
container_infrastructure = container_infrastructure_management
@ -68,17 +90,27 @@ class ServicesMixin:
rating = service_description.ServiceDescription(service_type='rating')
operator_policy = service_description.ServiceDescription(service_type='operator-policy')
operator_policy = service_description.ServiceDescription(
service_type='operator-policy'
)
policy = operator_policy
shared_file_system = shared_file_system_service.SharedFilesystemService(service_type='shared-file-system')
shared_file_system = shared_file_system_service.SharedFilesystemService(
service_type='shared-file-system'
)
share = shared_file_system
data_protection_orchestration = service_description.ServiceDescription(service_type='data-protection-orchestration')
data_protection_orchestration = service_description.ServiceDescription(
service_type='data-protection-orchestration'
)
orchestration = orchestration_service.OrchestrationService(service_type='orchestration')
orchestration = orchestration_service.OrchestrationService(
service_type='orchestration'
)
block_storage = block_storage_service.BlockStorageService(service_type='block-storage')
block_storage = block_storage_service.BlockStorageService(
service_type='block-storage'
)
block_store = block_storage
volume = block_storage
@ -92,44 +124,69 @@ class ServicesMixin:
event = service_description.ServiceDescription(service_type='event')
events = event
application_deployment = service_description.ServiceDescription(service_type='application-deployment')
application_deployment = service_description.ServiceDescription(
service_type='application-deployment'
)
application_deployment = application_deployment
multi_region_network_automation = service_description.ServiceDescription(service_type='multi-region-network-automation')
multi_region_network_automation = service_description.ServiceDescription(
service_type='multi-region-network-automation'
)
tricircle = multi_region_network_automation
database = database_service.DatabaseService(service_type='database')
application_container = service_description.ServiceDescription(service_type='application-container')
application_container = service_description.ServiceDescription(
service_type='application-container'
)
container = application_container
root_cause_analysis = service_description.ServiceDescription(service_type='root-cause-analysis')
root_cause_analysis = service_description.ServiceDescription(
service_type='root-cause-analysis'
)
rca = root_cause_analysis
nfv_orchestration = service_description.ServiceDescription(service_type='nfv-orchestration')
nfv_orchestration = service_description.ServiceDescription(
service_type='nfv-orchestration'
)
network = network_service.NetworkService(service_type='network')
backup = service_description.ServiceDescription(service_type='backup')
monitoring_logging = service_description.ServiceDescription(service_type='monitoring-logging')
monitoring_logging = service_description.ServiceDescription(
service_type='monitoring-logging'
)
monitoring_log_api = monitoring_logging
monitoring = service_description.ServiceDescription(service_type='monitoring')
monitoring = service_description.ServiceDescription(
service_type='monitoring'
)
monitoring_events = service_description.ServiceDescription(service_type='monitoring-events')
monitoring_events = service_description.ServiceDescription(
service_type='monitoring-events'
)
placement = placement_service.PlacementService(service_type='placement')
instance_ha = instance_ha_service.InstanceHaService(service_type='instance-ha')
instance_ha = instance_ha_service.InstanceHaService(
service_type='instance-ha'
)
ha = instance_ha
reservation = service_description.ServiceDescription(service_type='reservation')
reservation = service_description.ServiceDescription(
service_type='reservation'
)
function_engine = service_description.ServiceDescription(service_type='function-engine')
function_engine = service_description.ServiceDescription(
service_type='function-engine'
)
accelerator = accelerator_service.AcceleratorService(service_type='accelerator')
accelerator = accelerator_service.AcceleratorService(
service_type='accelerator'
)
admin_logic = service_description.ServiceDescription(service_type='admin-logic')
admin_logic = service_description.ServiceDescription(
service_type='admin-logic'
)
registration = admin_logic

View File

@ -82,14 +82,13 @@ class MetadataMixin:
url = utils.urljoin(self.base_path, self.id, 'metadata', key)
response = session.get(url)
exceptions.raise_from_response(
response, error_message='Metadata item does not exist')
response, error_message='Metadata item does not exist'
)
meta = response.json().get('meta', {})
# Here we need to potentially init metadata
metadata = self.metadata or {}
metadata[key] = meta.get(key)
self._body.attributes.update({
'metadata': metadata
})
self._body.attributes.update({'metadata': metadata})
return self
@ -101,17 +100,12 @@ class MetadataMixin:
:param str value: The value.
"""
url = utils.urljoin(self.base_path, self.id, 'metadata', key)
response = session.put(
url,
json={'meta': {key: value}}
)
response = session.put(url, json={'meta': {key: value}})
exceptions.raise_from_response(response)
# we do not want to update tags directly
metadata = self.metadata
metadata[key] = value
self._body.attributes.update({
'metadata': metadata
})
self._body.attributes.update({'metadata': metadata})
return self
def delete_metadata_item(self, session, key):
@ -132,7 +126,5 @@ class MetadataMixin:
metadata = {}
except ValueError:
pass # do nothing!
self._body.attributes.update({
'metadata': metadata
})
self._body.attributes.update({'metadata': metadata})
return self

View File

@ -26,8 +26,7 @@ class QuotaSet(resource.Resource):
allow_delete = True
allow_commit = True
_query_mapping = resource.QueryParameters(
"usage")
_query_mapping = resource.QueryParameters("usage")
# NOTE(gtema) Sadly this attribute is useless in all the methods, but keep
# it here extra as a reminder
@ -47,8 +46,14 @@ class QuotaSet(resource.Resource):
project_id = resource.URI('project_id')
def fetch(self, session, requires_id=False,
base_path=None, error_message=None, **params):
def fetch(
self,
session,
requires_id=False,
base_path=None,
error_message=None,
**params
):
return super(QuotaSet, self).fetch(
session,
requires_id=False,
@ -93,8 +98,9 @@ class QuotaSet(resource.Resource):
if 'in_use' in val:
normalized_attrs['usage'][key] = val['in_use']
if 'reserved' in val:
normalized_attrs['reservation'][key] = \
val['reserved']
normalized_attrs['reservation'][key] = val[
'reserved'
]
if 'limit' in val:
normalized_attrs[key] = val['limit']
else:

View File

@ -81,8 +81,9 @@ class TagMixin:
url = utils.urljoin(self.base_path, self.id, 'tags', tag)
session = self._get_session(session)
response = session.get(url)
exceptions.raise_from_response(response,
error_message='Tag does not exist')
exceptions.raise_from_response(
response, error_message='Tag does not exist'
)
return self
def add_tag(self, session, tag):
@ -98,9 +99,7 @@ class TagMixin:
# we do not want to update tags directly
tags = self.tags
tags.append(tag)
self._body.attributes.update({
'tags': tags
})
self._body.attributes.update({'tags': tags})
return self
def remove_tag(self, session, tag):
@ -121,7 +120,5 @@ class TagMixin:
tags.remove(tag)
except ValueError:
pass # do nothing!
self._body.attributes.update({
'tags': tags
})
self._body.attributes.update({'tags': tags})
return self

View File

@ -18,15 +18,20 @@ from openstack.config.loader import OpenStackConfig # noqa
def get_cloud_region(
service_key=None, options=None,
app_name=None, app_version=None,
load_yaml_config=True,
load_envvars=True,
**kwargs):
service_key=None,
options=None,
app_name=None,
app_version=None,
load_yaml_config=True,
load_envvars=True,
**kwargs
):
config = OpenStackConfig(
load_yaml_config=load_yaml_config,
load_envvars=load_envvars,
app_name=app_name, app_version=app_version)
app_name=app_name,
app_version=app_version,
)
if options:
config.register_argparse_arguments(options, sys.argv, service_key)
parsed_options = options.parse_known_args(sys.argv)

View File

@ -22,7 +22,9 @@ def normalize_keys(config):
elif isinstance(value, bool):
new_config[key] = value
elif isinstance(value, int) and key not in (
'verbose_level', 'api_timeout'):
'verbose_level',
'api_timeout',
):
new_config[key] = str(value)
elif isinstance(value, float):
new_config[key] = str(value)

View File

@ -18,7 +18,6 @@ from openstack.config import cloud_region
class CloudConfig(cloud_region.CloudRegion):
def __init__(self, name, region, config, **kwargs):
super(CloudConfig, self).__init__(name, region, config, **kwargs)
self.region = region

View File

@ -28,6 +28,7 @@ from keystoneauth1.loading import adapter as ks_load_adap
from keystoneauth1 import session as ks_session
import os_service_types
import requestsexceptions
try:
import statsd
except ImportError:
@ -52,9 +53,11 @@ from openstack import version as openstack_version
_logger = _log.setup_logging('openstack')
SCOPE_KEYS = {
'domain_id', 'domain_name',
'project_id', 'project_name',
'system_scope'
'domain_id',
'domain_name',
'project_id',
'project_name',
'system_scope',
}
# Sentinel for nonexistence
@ -90,9 +93,15 @@ def _get_implied_microversion(version):
return version
def from_session(session, name=None, region_name=None,
force_ipv4=False,
app_name=None, app_version=None, **kwargs):
def from_session(
session,
name=None,
region_name=None,
force_ipv4=False,
app_name=None,
app_version=None,
**kwargs
):
"""Construct a CloudRegion from an existing `keystoneauth1.session.Session`
When a Session already exists, we don't actually even need to go through
@ -118,9 +127,14 @@ def from_session(session, name=None, region_name=None,
config_dict = config_defaults.get_defaults()
config_dict.update(**kwargs)
return CloudRegion(
name=name, session=session, config=config_dict,
region_name=region_name, force_ipv4=force_ipv4,
app_name=app_name, app_version=app_version)
name=name,
session=session,
config=config_dict,
region_name=region_name,
force_ipv4=force_ipv4,
app_name=app_name,
app_version=app_version,
)
def from_conf(conf, session=None, service_types=None, **kwargs):
@ -160,8 +174,10 @@ def from_conf(conf, session=None, service_types=None, **kwargs):
for st in stm.all_types_by_service_type:
if service_types is not None and st not in service_types:
_disable_service(
config_dict, st,
reason="Not in the list of requested service_types.")
config_dict,
st,
reason="Not in the list of requested service_types.",
)
continue
project_name = stm.get_project_name(st)
if project_name not in conf:
@ -170,10 +186,13 @@ def from_conf(conf, session=None, service_types=None, **kwargs):
if project_name not in conf:
_disable_service(
config_dict, st,
config_dict,
st,
reason="No section for project '{project}' (service type "
"'{service_type}') was present in the config."
.format(project=project_name, service_type=st))
"'{service_type}') was present in the config.".format(
project=project_name, service_type=st
),
)
continue
opt_dict = {}
# Populate opt_dict with (appropriately processed) Adapter conf opts
@ -189,20 +208,24 @@ def from_conf(conf, session=None, service_types=None, **kwargs):
# option of) blowing up right away for (2) rather than letting them
# get all the way to the point of trying the service and having
# *that* blow up.
reason = ("Encountered an exception attempting to process config "
"for project '{project}' (service type "
"'{service_type}'): {exception}".format(
project=project_name, service_type=st, exception=e))
_logger.warning("Disabling service '{service_type}': "
"{reason}".format(service_type=st, reason=reason))
reason = (
"Encountered an exception attempting to process config "
"for project '{project}' (service type "
"'{service_type}'): {exception}".format(
project=project_name, service_type=st, exception=e
)
)
_logger.warning(
"Disabling service '{service_type}': "
"{reason}".format(service_type=st, reason=reason)
)
_disable_service(config_dict, st, reason=reason)
continue
# Load them into config_dict under keys prefixed by ${service_type}_
for raw_name, opt_val in opt_dict.items():
config_name = _make_key(raw_name, st)
config_dict[config_name] = opt_val
return CloudRegion(
session=session, config=config_dict, **kwargs)
return CloudRegion(session=session, config=config_dict, **kwargs)
class CloudRegion:
@ -232,18 +255,34 @@ class CloudRegion:
'interface': 'public'
"""
def __init__(self, name=None, region_name=None, config=None,
force_ipv4=False, auth_plugin=None,
openstack_config=None, session_constructor=None,
app_name=None, app_version=None, session=None,
discovery_cache=None, extra_config=None,
cache_expiration_time=0, cache_expirations=None,
cache_path=None, cache_class='dogpile.cache.null',
cache_arguments=None, password_callback=None,
statsd_host=None, statsd_port=None, statsd_prefix=None,
influxdb_config=None,
collector_registry=None,
cache_auth=False):
def __init__(
self,
name=None,
region_name=None,
config=None,
force_ipv4=False,
auth_plugin=None,
openstack_config=None,
session_constructor=None,
app_name=None,
app_version=None,
session=None,
discovery_cache=None,
extra_config=None,
cache_expiration_time=0,
cache_expirations=None,
cache_path=None,
cache_class='dogpile.cache.null',
cache_arguments=None,
password_callback=None,
statsd_host=None,
statsd_port=None,
statsd_prefix=None,
influxdb_config=None,
collector_registry=None,
cache_auth=False,
):
self._name = name
self.config = _util.normalize_keys(config)
# NOTE(efried): For backward compatibility: a) continue to accept the
@ -294,9 +333,7 @@ class CloudRegion:
return self.config.__iter__()
def __eq__(self, other):
return (
self.name == other.name
and self.config == other.config)
return self.name == other.name and self.config == other.config
def __ne__(self, other):
return not self == other
@ -306,7 +343,8 @@ class CloudRegion:
if self._name is None:
try:
self._name = urllib.parse.urlparse(
self.get_session().auth.auth_url).hostname
self.get_session().auth.auth_url
).hostname
except Exception:
self._name = self._app_name or ''
return self._name
@ -352,7 +390,9 @@ class CloudRegion:
"You are specifying a cacert for the cloud {full_name}"
" but also to ignore the host verification. The host SSL"
" cert will not be verified.".format(
full_name=self.full_name))
full_name=self.full_name
)
)
cert = self.config.get('cert')
if cert:
@ -365,19 +405,23 @@ class CloudRegion:
"""Return a list of service types we know something about."""
services = []
for key, val in self.config.items():
if (key.endswith('api_version')
or key.endswith('service_type')
or key.endswith('service_name')):
if (
key.endswith('api_version')
or key.endswith('service_type')
or key.endswith('service_name')
):
services.append("_".join(key.split('_')[:-2]))
return list(set(services))
def get_enabled_services(self):
services = set()
all_services = [k['service_type'] for k in
self._service_type_manager.services]
all_services.extend(k[4:] for k in
self.config.keys() if k.startswith('has_'))
all_services = [
k['service_type'] for k in self._service_type_manager.services
]
all_services.extend(
k[4:] for k in self.config.keys() if k.startswith('has_')
)
for srv in all_services:
ep = self.get_endpoint_from_catalog(srv)
@ -390,10 +434,13 @@ class CloudRegion:
return self.config.get('auth', {})
def _get_config(
self, key, service_type,
default=None,
fallback_to_unprefixed=False,
converter=None):
self,
key,
service_type,
default=None,
fallback_to_unprefixed=False,
converter=None,
):
'''Get a config value for a service_type.
Finds the config value for a key, looking first for it prefixed by
@ -442,11 +489,13 @@ class CloudRegion:
# If a region_name for the specific service_type is configured, use it;
# else use the one configured for the CloudRegion as a whole.
return self._get_config(
'region_name', service_type, fallback_to_unprefixed=True)
'region_name', service_type, fallback_to_unprefixed=True
)
def get_interface(self, service_type=None):
return self._get_config(
'interface', service_type, fallback_to_unprefixed=True)
'interface', service_type, fallback_to_unprefixed=True
)
def get_api_version(self, service_type):
version = self._get_config('api_version', service_type)
@ -458,7 +507,8 @@ class CloudRegion:
warnings.warn(
"You have a configured API_VERSION with 'latest' in"
" it. In the context of openstacksdk this doesn't make"
" any sense.")
" any sense."
)
return None
return version
@ -475,9 +525,11 @@ class CloudRegion:
# type will get us things in the right order.
if self._service_type_manager.is_known(service_type):
service_type = self._service_type_manager.get_service_type(
service_type)
service_type
)
return self._get_config(
'service_type', service_type, default=service_type)
'service_type', service_type, default=service_type
)
def get_service_name(self, service_type):
return self._get_config('service_name', service_type)
@ -492,8 +544,11 @@ class CloudRegion:
# then the endpoint value is the endpoint_override for every
# service.
value = auth.get('endpoint')
if (not value and service_type == 'identity'
and SCOPE_KEYS.isdisjoint(set(auth.keys()))):
if (
not value
and service_type == 'identity'
and SCOPE_KEYS.isdisjoint(set(auth.keys()))
):
# There are a small number of unscoped identity operations.
# Specifically, looking up a list of projects/domains/system to
# scope to.
@ -503,7 +558,8 @@ class CloudRegion:
# only v1 is in the catalog but the service actually does support
# v2. But the endpoint needs the project_id.
service_type = self._service_type_manager.get_service_type(
service_type)
service_type
)
if (
value
and self.config.get('profile') == 'rackspace'
@ -513,7 +569,8 @@ class CloudRegion:
return value
def get_endpoint_from_catalog(
self, service_type, interface=None, region_name=None):
self, service_type, interface=None, region_name=None
):
"""Return the endpoint for a given service as found in the catalog.
For values respecting endpoint overrides, see
@ -537,19 +594,26 @@ class CloudRegion:
return catalog.url_for(
service_type=service_type,
interface=interface,
region_name=region_name)
region_name=region_name,
)
except keystoneauth1.exceptions.catalog.EndpointNotFound:
return None
def get_connect_retries(self, service_type):
return self._get_config('connect_retries', service_type,
fallback_to_unprefixed=True,
converter=int)
return self._get_config(
'connect_retries',
service_type,
fallback_to_unprefixed=True,
converter=int,
)
def get_status_code_retries(self, service_type):
return self._get_config('status_code_retries', service_type,
fallback_to_unprefixed=True,
converter=int)
return self._get_config(
'status_code_retries',
service_type,
fallback_to_unprefixed=True,
converter=int,
)
@property
def prefer_ipv6(self):
@ -612,14 +676,16 @@ class CloudRegion:
desirable.
"""
self._keystone_session.additional_user_agent.append(
('openstacksdk', openstack_version.__version__))
('openstacksdk', openstack_version.__version__)
)
def get_session(self):
"""Return a keystoneauth session based on the auth credentials."""
if self._keystone_session is None:
if not self._auth:
raise exceptions.ConfigException(
"Problem with auth parameters")
"Problem with auth parameters"
)
(verify, cert) = self.get_requests_verify_args()
# Turn off urllib3 warnings about insecure certs if we have
# explicitly configured requests to tell it we do not want
@ -627,7 +693,8 @@ class CloudRegion:
if not verify:
self.log.debug(
"Turning off SSL warnings for {full_name}"
" since verify=False".format(full_name=self.full_name))
" since verify=False".format(full_name=self.full_name)
)
requestsexceptions.squelch_warnings(insecure_requests=not verify)
self._keystone_session = self._session_constructor(
auth=self._auth,
@ -635,7 +702,8 @@ class CloudRegion:
cert=cert,
timeout=self.config.get('api_timeout'),
collect_timing=self.config.get('timing'),
discovery_cache=self._discovery_cache)
discovery_cache=self._discovery_cache,
)
self.insert_user_agent()
# Using old keystoneauth with new os-client-config fails if
# we pass in app_name and app_version. Those are not essential,
@ -683,15 +751,20 @@ class CloudRegion:
default_microversion = self.get_default_microversion(service_type)
implied_microversion = _get_implied_microversion(version)
if (implied_microversion and default_microversion
and implied_microversion != default_microversion):
if (
implied_microversion
and default_microversion
and implied_microversion != default_microversion
):
raise exceptions.ConfigException(
"default_microversion of {default_microversion} was given"
" for {service_type}, but api_version looks like a"
" microversion as well. Please set api_version to just the"
" desired major version, or omit default_microversion".format(
default_microversion=default_microversion,
service_type=service_type))
service_type=service_type,
)
)
if implied_microversion:
default_microversion = implied_microversion
# If we're inferring a microversion, don't pass the whole
@ -715,7 +788,8 @@ class CloudRegion:
)
region_versions = versions.get(region_name, {})
interface_versions = region_versions.get(
self.get_interface(service_type), {})
self.get_interface(service_type), {}
)
return interface_versions.get(service_type, [])
def _get_endpoint_from_catalog(self, service_type, constructor):
@ -729,8 +803,7 @@ class CloudRegion:
return adapter.get_endpoint()
def _get_hardcoded_endpoint(self, service_type, constructor):
endpoint = self._get_endpoint_from_catalog(
service_type, constructor)
endpoint = self._get_endpoint_from_catalog(service_type, constructor)
if not endpoint.rstrip().rsplit('/')[-1] == 'v2.0':
if not endpoint.endswith('/'):
endpoint += '/'
@ -738,9 +811,8 @@ class CloudRegion:
return endpoint
def get_session_client(
self, service_type, version=None,
constructor=proxy.Proxy,
**kwargs):
self, service_type, version=None, constructor=proxy.Proxy, **kwargs
):
"""Return a prepped keystoneauth Adapter for a given service.
This is useful for making direct requests calls against a
@ -757,23 +829,28 @@ class CloudRegion:
version_request = self._get_version_request(service_type, version)
kwargs.setdefault('region_name', self.get_region_name(service_type))
kwargs.setdefault('connect_retries',
self.get_connect_retries(service_type))
kwargs.setdefault('status_code_retries',
self.get_status_code_retries(service_type))
kwargs.setdefault(
'connect_retries', self.get_connect_retries(service_type)
)
kwargs.setdefault(
'status_code_retries', self.get_status_code_retries(service_type)
)
kwargs.setdefault('statsd_prefix', self.get_statsd_prefix())
kwargs.setdefault('statsd_client', self.get_statsd_client())
kwargs.setdefault('prometheus_counter', self.get_prometheus_counter())
kwargs.setdefault(
'prometheus_histogram', self.get_prometheus_histogram())
'prometheus_histogram', self.get_prometheus_histogram()
)
kwargs.setdefault('influxdb_config', self._influxdb_config)
kwargs.setdefault('influxdb_client', self.get_influxdb_client())
endpoint_override = self.get_endpoint(service_type)
version = version_request.version
min_api_version = (
kwargs.pop('min_version', None) or version_request.min_api_version)
kwargs.pop('min_version', None) or version_request.min_api_version
)
max_api_version = (
kwargs.pop('max_version', None) or version_request.max_api_version)
kwargs.pop('max_version', None) or version_request.max_api_version
)
# Older neutron has inaccessible discovery document. Nobody noticed
# because neutronclient hard-codes an append of v2.0. YAY!
@ -784,7 +861,8 @@ class CloudRegion:
max_api_version = None
if endpoint_override is None:
endpoint_override = self._get_hardcoded_endpoint(
service_type, constructor)
service_type, constructor
)
client = constructor(
session=self.get_session(),
@ -798,14 +876,15 @@ class CloudRegion:
default_microversion=version_request.default_microversion,
rate_limit=self.get_rate_limit(service_type),
concurrency=self.get_concurrency(service_type),
**kwargs)
**kwargs
)
if version_request.default_microversion:
default_microversion = version_request.default_microversion
info = client.get_endpoint_data()
if not discover.version_between(
info.min_microversion,
info.max_microversion,
default_microversion
info.min_microversion,
info.max_microversion,
default_microversion,
):
if self.get_default_microversion(service_type):
raise exceptions.ConfigException(
@ -816,9 +895,13 @@ class CloudRegion:
service_type=service_type,
default_microversion=default_microversion,
min_microversion=discover.version_to_string(
info.min_microversion),
info.min_microversion
),
max_microversion=discover.version_to_string(
info.max_microversion)))
info.max_microversion
),
)
)
else:
raise exceptions.ConfigException(
"A default microversion for service {service_type} of"
@ -836,13 +919,18 @@ class CloudRegion:
api_version=self.get_api_version(service_type),
default_microversion=default_microversion,
min_microversion=discover.version_to_string(
info.min_microversion),
info.min_microversion
),
max_microversion=discover.version_to_string(
info.max_microversion)))
info.max_microversion
),
)
)
return client
def get_session_endpoint(
self, service_type, min_version=None, max_version=None):
self, service_type, min_version=None, max_version=None
):
"""Return the endpoint from config or the catalog.
If a configuration lists an explicit endpoint for a service,
@ -934,38 +1022,50 @@ class CloudRegion:
def get_external_networks(self):
"""Get list of network names for external networks."""
return [
net['name'] for net in self.config.get('networks', [])
if net['routes_externally']]
net['name']
for net in self.config.get('networks', [])
if net['routes_externally']
]
def get_external_ipv4_networks(self):
"""Get list of network names for external IPv4 networks."""
return [
net['name'] for net in self.config.get('networks', [])
if net['routes_ipv4_externally']]
net['name']
for net in self.config.get('networks', [])
if net['routes_ipv4_externally']
]
def get_external_ipv6_networks(self):
"""Get list of network names for external IPv6 networks."""
return [
net['name'] for net in self.config.get('networks', [])
if net['routes_ipv6_externally']]
net['name']
for net in self.config.get('networks', [])
if net['routes_ipv6_externally']
]
def get_internal_networks(self):
"""Get list of network names for internal networks."""
return [
net['name'] for net in self.config.get('networks', [])
if not net['routes_externally']]
net['name']
for net in self.config.get('networks', [])
if not net['routes_externally']
]
def get_internal_ipv4_networks(self):
"""Get list of network names for internal IPv4 networks."""
return [
net['name'] for net in self.config.get('networks', [])
if not net['routes_ipv4_externally']]
net['name']
for net in self.config.get('networks', [])
if not net['routes_ipv4_externally']
]
def get_internal_ipv6_networks(self):
"""Get list of network names for internal IPv6 networks."""
return [
net['name'] for net in self.config.get('networks', [])
if not net['routes_ipv6_externally']]
net['name']
for net in self.config.get('networks', [])
if not net['routes_ipv6_externally']
]
def get_default_network(self):
"""Get network used for default interactions."""
@ -999,8 +1099,8 @@ class CloudRegion:
if not key:
return defaults
return _util.merge_clouds(
defaults,
_util.normalize_keys(self._extra_config.get(key, {})))
defaults, _util.normalize_keys(self._extra_config.get(key, {}))
)
def get_client_config(self, name=None, defaults=None):
"""Get config settings for a named client.
@ -1020,25 +1120,29 @@ class CloudRegion:
client section and the defaults.
"""
return self._get_extra_config(
name, self._get_extra_config('client', defaults))
name, self._get_extra_config('client', defaults)
)
def get_password_callback(self):
return self._password_callback
def get_rate_limit(self, service_type=None):
return self._get_service_config(
'rate_limit', service_type=service_type)
'rate_limit', service_type=service_type
)
def get_concurrency(self, service_type=None):
return self._get_service_config(
'concurrency', service_type=service_type)
'concurrency', service_type=service_type
)
def get_statsd_client(self):
if not statsd:
if self._statsd_host:
self.log.warning(
'StatsD python library is not available. '
'Reporting disabled')
'Reporting disabled'
)
return None
statsd_args = {}
if self._statsd_host:
@ -1075,7 +1179,10 @@ class CloudRegion:
'openstack_http_response_time',
'Time taken for an http response to an OpenStack service',
labelnames=[
'method', 'endpoint', 'service_type', 'status_code'
'method',
'endpoint',
'service_type',
'status_code',
],
registry=registry,
)
@ -1092,7 +1199,10 @@ class CloudRegion:
'openstack_http_requests',
'Number of HTTP requests made to an OpenStack service',
labelnames=[
'method', 'endpoint', 'service_type', 'status_code'
'method',
'endpoint',
'service_type',
'status_code',
],
registry=registry,
)
@ -1103,7 +1213,8 @@ class CloudRegion:
service_type = service_type.lower().replace('-', '_')
key = 'has_{service_type}'.format(service_type=service_type)
return self.config.get(
key, self._service_type_manager.is_official(service_type))
key, self._service_type_manager.is_official(service_type)
)
def disable_service(self, service_type, reason=None):
_disable_service(self.config, service_type, reason=reason)
@ -1140,6 +1251,8 @@ class CloudRegion:
except Exception:
self.log.warning('Cannot establish connection to InfluxDB')
else:
self.log.warning('InfluxDB configuration is present, '
'but no client library is found.')
self.log.warning(
'InfluxDB configuration is present, '
'but no client library is found.'
)
return None

View File

@ -17,7 +17,8 @@ import os
import threading
_json_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'defaults.json')
os.path.dirname(os.path.realpath(__file__)), 'defaults.json'
)
_defaults = None
_defaults_lock = threading.Lock()

View File

@ -46,19 +46,23 @@ CACHE_PATH = APPDIRS.user_cache_dir
# see https://snapcraft.io/docs/environment-variables
SNAP_REAL_HOME = os.getenv('SNAP_REAL_HOME')
if SNAP_REAL_HOME:
UNIX_CONFIG_HOME = os.path.join(os.path.join(SNAP_REAL_HOME, '.config'),
'openstack')
UNIX_CONFIG_HOME = os.path.join(
os.path.join(SNAP_REAL_HOME, '.config'), 'openstack'
)
else:
UNIX_CONFIG_HOME = os.path.join(
os.path.expanduser(os.path.join('~', '.config')), 'openstack')
os.path.expanduser(os.path.join('~', '.config')), 'openstack'
)
UNIX_SITE_CONFIG_HOME = '/etc/openstack'
SITE_CONFIG_HOME = APPDIRS.site_config_dir
CONFIG_SEARCH_PATH = [
os.getcwd(),
CONFIG_HOME, UNIX_CONFIG_HOME,
SITE_CONFIG_HOME, UNIX_SITE_CONFIG_HOME
CONFIG_HOME,
UNIX_CONFIG_HOME,
SITE_CONFIG_HOME,
UNIX_SITE_CONFIG_HOME,
]
YAML_SUFFIXES = ('.yaml', '.yml')
JSON_SUFFIXES = ('.json',)
@ -134,8 +138,8 @@ def _fix_argv(argv):
"The following options were given: '{options}' which contain"
" duplicates except that one has _ and one has -. There is"
" no sane way for us to know what you're doing. Remove the"
" duplicate option and try again".format(
options=','.join(overlap)))
" duplicate option and try again".format(options=','.join(overlap))
)
class OpenStackConfig:
@ -146,14 +150,25 @@ class OpenStackConfig:
_cloud_region_class = cloud_region.CloudRegion
_defaults_module = defaults
def __init__(self, config_files=None, vendor_files=None,
override_defaults=None, force_ipv4=None,
envvar_prefix=None, secure_files=None,
pw_func=None, session_constructor=None,
app_name=None, app_version=None,
load_yaml_config=True, load_envvars=True,
statsd_host=None, statsd_port=None,
statsd_prefix=None, influxdb_config=None):
def __init__(
self,
config_files=None,
vendor_files=None,
override_defaults=None,
force_ipv4=None,
envvar_prefix=None,
secure_files=None,
pw_func=None,
session_constructor=None,
app_name=None,
app_version=None,
load_yaml_config=True,
load_envvars=True,
statsd_host=None,
statsd_port=None,
statsd_prefix=None,
influxdb_config=None,
):
self.log = _log.setup_logging('openstack.config')
self._session_constructor = session_constructor
self._app_name = app_name
@ -196,7 +211,8 @@ class OpenStackConfig:
_, secure_config = self._load_secure_file()
if secure_config:
self.cloud_config = _util.merge_clouds(
self.cloud_config, secure_config)
self.cloud_config, secure_config
)
if not self.cloud_config:
self.cloud_config = {'clouds': {}}
@ -217,14 +233,20 @@ class OpenStackConfig:
# Get the backwards compat value
prefer_ipv6 = get_boolean(
self._get_envvar(
'OS_PREFER_IPV6', client_config.get(
'prefer_ipv6', client_config.get(
'prefer-ipv6', True))))
'OS_PREFER_IPV6',
client_config.get(
'prefer_ipv6', client_config.get('prefer-ipv6', True)
),
)
)
force_ipv4 = get_boolean(
self._get_envvar(
'OS_FORCE_IPV4', client_config.get(
'force_ipv4', client_config.get(
'broken-ipv6', False))))
'OS_FORCE_IPV4',
client_config.get(
'force_ipv4', client_config.get('broken-ipv6', False)
),
)
)
self.force_ipv4 = force_ipv4
if not prefer_ipv6:
@ -239,8 +261,10 @@ class OpenStackConfig:
'"{0}" defines a cloud named "{1}", but'
' OS_CLOUD_NAME is also set to "{1}". Please rename'
' either your environment based cloud, or one of your'
' file-based clouds.'.format(self.config_filename,
self.envvar_key))
' file-based clouds.'.format(
self.config_filename, self.envvar_key
)
)
self.default_cloud = self._get_envvar('OS_CLOUD')
@ -259,15 +283,15 @@ class OpenStackConfig:
# clouds.yaml.
# The next/iter thing is for python3 compat where dict.keys
# returns an iterator but in python2 it's a list.
self.default_cloud = next(iter(
self.cloud_config['clouds'].keys()))
self.default_cloud = next(
iter(self.cloud_config['clouds'].keys())
)
# Finally, fall through and make a cloud that starts with defaults
# because we need somewhere to put arguments, and there are neither
# config files or env vars
if not self.cloud_config['clouds']:
self.cloud_config = dict(
clouds=dict(defaults=dict(self.defaults)))
self.cloud_config = dict(clouds=dict(defaults=dict(self.defaults)))
self.default_cloud = 'defaults'
self._cache_auth = False
@ -281,13 +305,15 @@ class OpenStackConfig:
cache_settings = _util.normalize_keys(self.cloud_config['cache'])
self._cache_auth = get_boolean(
cache_settings.get('auth', self._cache_auth))
cache_settings.get('auth', self._cache_auth)
)
# expiration_time used to be 'max_age' but the dogpile setting
# is expiration_time. Support max_age for backwards compat.
self._cache_expiration_time = cache_settings.get(
'expiration_time', cache_settings.get(
'max_age', self._cache_expiration_time))
'expiration_time',
cache_settings.get('max_age', self._cache_expiration_time),
)
# If cache class is given, use that. If not, but if cache time
# is given, default to memory. Otherwise, default to nothing.
@ -295,14 +321,18 @@ class OpenStackConfig:
if self._cache_expiration_time:
self._cache_class = 'dogpile.cache.memory'
self._cache_class = self.cloud_config['cache'].get(
'class', self._cache_class)
'class', self._cache_class
)
self._cache_path = os.path.expanduser(
cache_settings.get('path', self._cache_path))
cache_settings.get('path', self._cache_path)
)
self._cache_arguments = cache_settings.get(
'arguments', self._cache_arguments)
'arguments', self._cache_arguments
)
self._cache_expirations = cache_settings.get(
'expiration', self._cache_expirations)
'expiration', self._cache_expirations
)
if load_yaml_config:
metrics_config = self.cloud_config.get('metrics', {})
@ -326,12 +356,21 @@ class OpenStackConfig:
use_udp = use_udp.lower() in ('true', 'yes', '1')
elif not isinstance(use_udp, bool):
use_udp = False
self.log.warning('InfluxDB.use_udp value type is not '
'supported. Use one of '
'[true|false|yes|no|1|0]')
self.log.warning(
'InfluxDB.use_udp value type is not '
'supported. Use one of '
'[true|false|yes|no|1|0]'
)
config['use_udp'] = use_udp
for key in ['host', 'port', 'username', 'password', 'database',
'measurement', 'timeout']:
for key in [
'host',
'port',
'username',
'password',
'database',
'measurement',
'timeout',
]:
if key in influxdb_config:
config[key] = influxdb_config[key]
self._influxdb_config = config
@ -357,20 +396,28 @@ class OpenStackConfig:
if not envvar_prefix:
# This makes the or below be OS_ or OS_ which is a no-op
envvar_prefix = 'OS_'
environkeys = [k for k in os.environ.keys()
if (k.startswith('OS_') or k.startswith(envvar_prefix))
and not k.startswith('OS_TEST') # infra CI var
and not k.startswith('OS_STD') # oslotest var
and not k.startswith('OS_LOG') # oslotest var
]
environkeys = [
k
for k in os.environ.keys()
if (k.startswith('OS_') or k.startswith(envvar_prefix))
and not k.startswith('OS_TEST') # infra CI var
and not k.startswith('OS_STD') # oslotest var
and not k.startswith('OS_LOG') # oslotest var
]
for k in environkeys:
newkey = k.split('_', 1)[-1].lower()
ret[newkey] = os.environ[k]
# If the only environ keys are selectors or behavior modification,
# don't return anything
selectors = set([
'OS_CLOUD', 'OS_REGION_NAME',
'OS_CLIENT_CONFIG_FILE', 'OS_CLIENT_SECURE_FILE', 'OS_CLOUD_NAME'])
selectors = set(
[
'OS_CLOUD',
'OS_REGION_NAME',
'OS_CLIENT_CONFIG_FILE',
'OS_CLIENT_SECURE_FILE',
'OS_CLOUD_NAME',
]
)
if set(environkeys) - selectors:
return ret
return None
@ -391,8 +438,8 @@ class OpenStackConfig:
if not key:
return defaults
return _util.merge_clouds(
defaults,
_util.normalize_keys(self.cloud_config.get(key, {})))
defaults, _util.normalize_keys(self.cloud_config.get(key, {}))
)
def _load_config_file(self):
return self._load_yaml_json_file(self._config_files)
@ -427,10 +474,12 @@ class OpenStackConfig:
for region in regions:
if isinstance(region, dict):
# i.e. must have name key, and only name,values keys
if 'name' not in region or \
not {'name', 'values'} >= set(region):
if 'name' not in region or not {'name', 'values'} >= set(
region
):
raise exceptions.ConfigException(
'Invalid region entry at: %s' % region)
'Invalid region entry at: %s' % region
)
if 'values' not in region:
region['values'] = {}
ret.append(copy.deepcopy(region))
@ -460,7 +509,8 @@ class OpenStackConfig:
warnings.warn(
"Comma separated lists in region_name are deprecated."
" Please use a yaml list in the regions"
" parameter in {0} instead.".format(self.config_filename))
" parameter in {0} instead.".format(self.config_filename)
)
return self._expand_regions(regions)
else:
# crappit. we don't have a region defined.
@ -495,7 +545,9 @@ class OpenStackConfig:
' region names are case sensitive.'.format(
region_name=region_name,
region_list=','.join([r['name'] for r in regions]),
cloud=cloud))
cloud=cloud,
)
)
def get_cloud_names(self):
return self.cloud_config['clouds'].keys()
@ -506,8 +558,8 @@ class OpenStackConfig:
# Only validate cloud name if one was given
if name and name not in self.cloud_config['clouds']:
raise exceptions.ConfigException(
"Cloud {name} was not found.".format(
name=name))
"Cloud {name} was not found.".format(name=name)
)
our_cloud = self.cloud_config['clouds'].get(name, dict())
if profile:
@ -536,11 +588,15 @@ class OpenStackConfig:
warnings.warn(
"{0} uses the keyword 'cloud' to reference a known "
"vendor profile. This has been deprecated in favor of the "
"'profile' keyword.".format(self.config_filename))
"'profile' keyword.".format(self.config_filename)
)
vendor_filename, vendor_file = self._load_vendor_file()
if (vendor_file and 'public-clouds' in vendor_file
and profile_name in vendor_file['public-clouds']):
if (
vendor_file
and 'public-clouds' in vendor_file
and profile_name in vendor_file['public-clouds']
):
_auth_update(cloud, vendor_file['public-clouds'][profile_name])
else:
profile_data = vendors.get_profile(profile_name)
@ -555,23 +611,31 @@ class OpenStackConfig:
if status == 'deprecated':
warnings.warn(
"{profile_name} is deprecated: {message}".format(
profile_name=profile_name, message=message))
profile_name=profile_name, message=message
)
)
elif status == 'shutdown':
raise exceptions.ConfigException(
"{profile_name} references a cloud that no longer"
" exists: {message}".format(
profile_name=profile_name, message=message))
profile_name=profile_name, message=message
)
)
_auth_update(cloud, profile_data)
else:
# Can't find the requested vendor config, go about business
warnings.warn("Couldn't find the vendor profile '{0}', for"
" the cloud '{1}'".format(profile_name,
name))
warnings.warn(
"Couldn't find the vendor profile '{0}', for"
" the cloud '{1}'".format(profile_name, name)
)
def _project_scoped(self, cloud):
return ('project_id' in cloud or 'project_name' in cloud
or 'project_id' in cloud['auth']
or 'project_name' in cloud['auth'])
return (
'project_id' in cloud
or 'project_name' in cloud
or 'project_id' in cloud['auth']
or 'project_name' in cloud['auth']
)
def _validate_networks(self, networks, key):
value = None
@ -580,9 +644,9 @@ class OpenStackConfig:
raise exceptions.ConfigException(
"Duplicate network entries for {key}: {net1} and {net2}."
" Only one network can be flagged with {key}".format(
key=key,
net1=value['name'],
net2=net['name']))
key=key, net1=value['name'], net2=net['name']
)
)
if not value and net[key]:
value = net
@ -595,7 +659,8 @@ class OpenStackConfig:
name = net.get('name')
if not name:
raise exceptions.ConfigException(
'Entry in network list is missing required field "name".')
'Entry in network list is missing required field "name".'
)
network = dict(
name=name,
routes_externally=get_boolean(net.get('routes_externally')),
@ -605,12 +670,12 @@ class OpenStackConfig:
)
# routes_ipv4_externally defaults to the value of routes_externally
network['routes_ipv4_externally'] = get_boolean(
net.get(
'routes_ipv4_externally', network['routes_externally']))
net.get('routes_ipv4_externally', network['routes_externally'])
)
# routes_ipv6_externally defaults to the value of routes_externally
network['routes_ipv6_externally'] = get_boolean(
net.get(
'routes_ipv6_externally', network['routes_externally']))
net.get('routes_ipv6_externally', network['routes_externally'])
)
networks.append(network)
for key in ('external_network', 'internal_network'):
@ -619,18 +684,24 @@ class OpenStackConfig:
raise exceptions.ConfigException(
"Both {key} and networks were specified in the config."
" Please remove {key} from the config and use the network"
" list to configure network behavior.".format(key=key))
" list to configure network behavior.".format(key=key)
)
if key in cloud:
warnings.warn(
"{key} is deprecated. Please replace with an entry in"
" a dict inside of the networks list with name: {name}"
" and routes_externally: {external}".format(
key=key, name=cloud[key], external=external))
networks.append(dict(
name=cloud[key],
routes_externally=external,
nat_destination=not external,
default_interface=external))
key=key, name=cloud[key], external=external
)
)
networks.append(
dict(
name=cloud[key],
routes_externally=external,
nat_destination=not external,
default_interface=external,
)
)
# Validate that we don't have duplicates
self._validate_networks(networks, 'nat_destination')
@ -668,7 +739,9 @@ class OpenStackConfig:
'user_domain_name': ('user_domain_name', 'user-domain-name'),
'project_domain_id': ('project_domain_id', 'project-domain-id'),
'project_domain_name': (
'project_domain_name', 'project-domain-name'),
'project_domain_name',
'project-domain-name',
),
'token': ('auth-token', 'auth_token', 'token'),
}
if cloud.get('auth_type', None) == 'v2password':
@ -676,14 +749,30 @@ class OpenStackConfig:
# clouds. That's fine - we need to map settings in the opposite
# direction
mappings['tenant_id'] = (
'project_id', 'project-id', 'tenant_id', 'tenant-id')
'project_id',
'project-id',
'tenant_id',
'tenant-id',
)
mappings['tenant_name'] = (
'project_name', 'project-name', 'tenant_name', 'tenant-name')
'project_name',
'project-name',
'tenant_name',
'tenant-name',
)
else:
mappings['project_id'] = (
'tenant_id', 'tenant-id', 'project_id', 'project-id')
'tenant_id',
'tenant-id',
'project_id',
'project-id',
)
mappings['project_name'] = (
'tenant_name', 'tenant-name', 'project_name', 'project-name')
'tenant_name',
'tenant-name',
'project_name',
'project-name',
)
for target_key, possible_values in mappings.items():
target = None
for key in possible_values:
@ -747,7 +836,8 @@ class OpenStackConfig:
'--os-cloud',
metavar='<name>',
default=self._get_envvar('OS_CLOUD', None),
help='Named cloud to connect to')
help='Named cloud to connect to',
)
# we need to peek to see if timeout was actually passed, since
# the keystoneauth declaration of it has a default, which means
@ -782,7 +872,8 @@ class OpenStackConfig:
try:
loading.register_auth_argparse_arguments(
parser, argv, default=default_auth_type)
parser, argv, default=default_auth_type
)
except Exception:
# Hidiing the keystoneauth exception because we're not actually
# loading the auth plugin at this point, so the error message
@ -793,7 +884,9 @@ class OpenStackConfig:
"An invalid auth-type was specified: {auth_type}."
" Valid choices are: {plugin_names}.".format(
auth_type=options.os_auth_type,
plugin_names=",".join(plugin_names)))
plugin_names=",".join(plugin_names),
)
)
if service_keys:
primary_service = service_keys[0]
@ -801,15 +894,19 @@ class OpenStackConfig:
primary_service = None
loading.register_session_argparse_arguments(parser)
adapter.register_adapter_argparse_arguments(
parser, service_type=primary_service)
parser, service_type=primary_service
)
for service_key in service_keys:
# legacy clients have un-prefixed api-version options
parser.add_argument(
'--{service_key}-api-version'.format(
service_key=service_key.replace('_', '-')),
help=argparse_mod.SUPPRESS)
service_key=service_key.replace('_', '-')
),
help=argparse_mod.SUPPRESS,
)
adapter.register_service_adapter_argparse_arguments(
parser, service_type=service_key)
parser, service_type=service_key
)
# Backwards compat options for legacy clients
parser.add_argument('--http-timeout', help=argparse_mod.SUPPRESS)
@ -837,7 +934,8 @@ class OpenStackConfig:
service_timeout = None
for key in cloud.keys():
if key.endswith('timeout') and not (
key == 'timeout' or key == 'api_timeout'):
key == 'timeout' or key == 'api_timeout'
):
service_timeout = cloud[key]
else:
new_cloud[key] = cloud[key]
@ -857,9 +955,11 @@ class OpenStackConfig:
for cloud in self.get_cloud_names():
for region in self._get_regions(cloud):
if region:
clouds.append(self.get_one(
cloud, region_name=region['name']))
clouds.append(
self.get_one(cloud, region_name=region['name'])
)
return clouds
# TODO(mordred) Backwards compat for OSC transition
get_all_clouds = get_all
@ -904,8 +1004,9 @@ class OpenStackConfig:
if opt_name in config:
return config[opt_name]
else:
deprecated = getattr(opt, 'deprecated', getattr(
opt, 'deprecated_opts', []))
deprecated = getattr(
opt, 'deprecated', getattr(opt, 'deprecated_opts', [])
)
for d_opt in deprecated:
d_opt_name = d_opt.name.replace('-', '_')
if d_opt_name in config:
@ -1027,9 +1128,9 @@ class OpenStackConfig:
def option_prompt(self, config, p_opt):
"""Prompt user for option that requires a value"""
if (
getattr(p_opt, 'prompt', None) is not None
and p_opt.dest not in config['auth']
and self._pw_callback is not None
getattr(p_opt, 'prompt', None) is not None
and p_opt.dest not in config['auth']
and self._pw_callback is not None
):
config['auth'][p_opt.dest] = self._pw_callback(p_opt.prompt)
return config
@ -1046,8 +1147,7 @@ class OpenStackConfig:
# Prefer the plugin configuration dest value if the value's key
# is marked as depreciated.
if p_opt.dest is None:
config['auth'][p_opt.name.replace('-', '_')] = (
winning_value)
config['auth'][p_opt.name.replace('-', '_')] = winning_value
else:
config['auth'][p_opt.dest] = winning_value
return config
@ -1056,9 +1156,11 @@ class OpenStackConfig:
"""Perform the set of magic argument fixups"""
# Infer token plugin if a token was given
if (('auth' in config and 'token' in config['auth'])
or ('auth_token' in config and config['auth_token'])
or ('token' in config and config['token'])):
if (
('auth' in config and 'token' in config['auth'])
or ('auth_token' in config and config['auth_token'])
or ('token' in config and config['token'])
):
config.setdefault('token', config.pop('auth_token', None))
# Infer passcode if it was given separately
@ -1094,12 +1196,12 @@ class OpenStackConfig:
# more generalized
if 'auth' in config and 'auth_url' in config['auth']:
config['auth']['auth_url'] = config['auth']['auth_url'].format(
**config)
**config
)
return config
def get_one(
self, cloud=None, validate=True, argparse=None, **kwargs):
def get_one(self, cloud=None, validate=True, argparse=None, **kwargs):
"""Retrieve a single CloudRegion and merge additional options
:param string cloud:
@ -1217,15 +1319,12 @@ class OpenStackConfig:
statsd_prefix=statsd_prefix,
influxdb_config=influxdb_config,
)
# TODO(mordred) Backwards compat for OSC transition
get_one_cloud = get_one
def get_one_cloud_osc(
self,
cloud=None,
validate=True,
argparse=None,
**kwargs
self, cloud=None, validate=True, argparse=None, **kwargs
):
"""Retrieve a single CloudRegion and merge additional options
@ -1359,10 +1458,10 @@ if __name__ == '__main__':
if len(sys.argv) == 1:
print_cloud = True
elif len(sys.argv) == 3 and (
sys.argv[1] == cloud.name and sys.argv[2] == cloud.region):
sys.argv[1] == cloud.name and sys.argv[2] == cloud.region
):
print_cloud = True
elif len(sys.argv) == 2 and (
sys.argv[1] == cloud.name):
elif len(sys.argv) == 2 and (sys.argv[1] == cloud.name):
print_cloud = True
if print_cloud:

View File

@ -61,7 +61,9 @@ def get_profile(profile_name):
" {status_code} {reason}".format(
profile_name=profile_name,
status_code=response.status_code,
reason=response.reason))
reason=response.reason,
)
)
vendor_defaults[profile_name] = None
return
vendor_data = response.json()
@ -69,8 +71,8 @@ def get_profile(profile_name):
# Merge named and url cloud config, but make named config override the
# config from the cloud so that we can supply local overrides if needed.
profile = _util.merge_clouds(
vendor_data['profile'],
vendor_defaults.get(name, {}))
vendor_data['profile'], vendor_defaults.get(name, {})
)
# If there is (or was) a profile listed in a named config profile, it
# might still be here. We just merged in content from a URL though, so
# pop the key to prevent doing it again in the future.

View File

@ -220,7 +220,8 @@ __all__ = [
if requestsexceptions.SubjectAltNameWarning:
warnings.filterwarnings(
'ignore', category=requestsexceptions.SubjectAltNameWarning)
'ignore', category=requestsexceptions.SubjectAltNameWarning
)
_logger = _log.setup_logging('openstack')
@ -249,7 +250,8 @@ def from_config(cloud=None, config=None, options=None, **kwargs):
config = kwargs.pop('cloud_config', config)
if config is None:
config = _config.OpenStackConfig().get_one(
cloud=cloud, argparse=options, **kwargs)
cloud=cloud, argparse=options, **kwargs
)
return Connection(config=config)
@ -274,20 +276,25 @@ class Connection(
_security_group.SecurityGroupCloudMixin,
_shared_file_system.SharedFileSystemCloudMixin,
):
def __init__(self, cloud=None, config=None, session=None,
app_name=None, app_version=None,
extra_services=None,
strict=False,
use_direct_get=False,
task_manager=None,
rate_limit=None,
oslo_conf=None,
service_types=None,
global_request_id=None,
strict_proxies=False,
pool_executor=None,
**kwargs):
def __init__(
self,
cloud=None,
config=None,
session=None,
app_name=None,
app_version=None,
extra_services=None,
strict=False,
use_direct_get=False,
task_manager=None,
rate_limit=None,
oslo_conf=None,
service_types=None,
global_request_id=None,
strict_proxies=False,
pool_executor=None,
**kwargs
):
"""Create a connection to a cloud.
A connection needs information about how to connect, how to
@ -373,24 +380,32 @@ class Connection(
if not self.config:
if oslo_conf:
self.config = cloud_region.from_conf(
oslo_conf, session=session, app_name=app_name,
app_version=app_version, service_types=service_types)
oslo_conf,
session=session,
app_name=app_name,
app_version=app_version,
service_types=service_types,
)
elif session:
self.config = cloud_region.from_session(
session=session,
app_name=app_name, app_version=app_version,
app_name=app_name,
app_version=app_version,
load_yaml_config=False,
load_envvars=False,
rate_limit=rate_limit,
**kwargs)
**kwargs
)
else:
self.config = _config.get_cloud_region(
cloud=cloud,
app_name=app_name, app_version=app_version,
app_name=app_name,
app_version=app_version,
load_yaml_config=cloud is not None,
load_envvars=cloud is not None,
rate_limit=rate_limit,
**kwargs)
**kwargs
)
self._session = None
self._proxies = {}
@ -440,19 +455,25 @@ class Connection(
hook = ep.load()
hook(self)
except ValueError:
self.log.warning('Hook should be in the entrypoint '
'module:attribute format')
self.log.warning(
'Hook should be in the entrypoint '
'module:attribute format'
)
except (ImportError, TypeError, AttributeError) as e:
self.log.warning('Configured hook %s cannot be executed: %s',
vendor_hook, e)
self.log.warning(
'Configured hook %s cannot be executed: %s', vendor_hook, e
)
# Add additional metrics into the configuration according to the
# selected connection. We don't want to deal with overall config in the
# proxy, just pass required part.
if (self.config._influxdb_config
and 'additional_metric_tags' in self.config.config):
self.config._influxdb_config['additional_metric_tags'] = \
self.config.config['additional_metric_tags']
if (
self.config._influxdb_config
and 'additional_metric_tags' in self.config.config
):
self.config._influxdb_config[
'additional_metric_tags'
] = self.config.config['additional_metric_tags']
def __del__(self):
# try to force release of resources and save authorization
@ -500,7 +521,7 @@ class Connection(
setattr(
self.__class__,
attr_name.replace('-', '_'),
property(fget=getter)
property(fget=getter),
)
self.config.enable_service(service.service_type)
@ -527,7 +548,8 @@ class Connection(
def _pool_executor(self):
if not self.__pool_executor:
self.__pool_executor = concurrent.futures.ThreadPoolExecutor(
max_workers=5)
max_workers=5
)
return self.__pool_executor
def close(self):

View File

@ -24,6 +24,7 @@ from requests import exceptions as _rex
class SDKException(Exception):
"""The base exception class for all exceptions this library raises."""
def __init__(self, message=None, extra_data=None):
self.message = self.__class__.__name__ if message is None else message
self.extra_data = extra_data
@ -35,6 +36,7 @@ OpenStackCloudException = SDKException
class EndpointNotFound(SDKException):
"""A mismatch occurred between what the client and server expect."""
def __init__(self, message=None):
super(EndpointNotFound, self).__init__(message)
@ -55,20 +57,25 @@ class InvalidRequest(SDKException):
class HttpException(SDKException, _rex.HTTPError):
def __init__(self, message='Error', response=None,
http_status=None,
details=None, request_id=None):
def __init__(
self,
message='Error',
response=None,
http_status=None,
details=None,
request_id=None,
):
# TODO(shade) Remove http_status parameter and the ability for response
# to be None once we're not mocking Session everywhere.
if not message:
if response is not None:
message = "{name}: {code}".format(
name=self.__class__.__name__,
code=response.status_code)
name=self.__class__.__name__, code=response.status_code
)
else:
message = "{name}: Unknown error".format(
name=self.__class__.__name__)
name=self.__class__.__name__
)
# Call directly rather than via super to control parameters
SDKException.__init__(self, message=message)
@ -96,7 +103,8 @@ class HttpException(SDKException, _rex.HTTPError):
return self.message
if self.url:
remote_error = "{source} Error for url: {url}".format(
source=self.source, url=self.url)
source=self.source, url=self.url
)
if self.details:
remote_error += ', '
if self.details:
@ -104,31 +112,37 @@ class HttpException(SDKException, _rex.HTTPError):
return "{message}: {remote_error}".format(
message=super(HttpException, self).__str__(),
remote_error=remote_error)
remote_error=remote_error,
)
class BadRequestException(HttpException):
"""HTTP 400 Bad Request."""
pass
class ForbiddenException(HttpException):
"""HTTP 403 Forbidden Request."""
pass
class ConflictException(HttpException):
"""HTTP 409 Conflict."""
pass
class PreconditionFailedException(HttpException):
"""HTTP 412 Precondition Failed."""
pass
class MethodNotSupported(SDKException):
"""The resource does not support this operation type."""
def __init__(self, resource, method):
# This needs to work with both classes and instances.
try:
@ -136,18 +150,23 @@ class MethodNotSupported(SDKException):
except AttributeError:
name = resource.__class__.__name__
message = ('The %s method is not supported for %s.%s' %
(method, resource.__module__, name))
message = 'The %s method is not supported for %s.%s' % (
method,
resource.__module__,
name,
)
super(MethodNotSupported, self).__init__(message=message)
class DuplicateResource(SDKException):
"""More than one resource exists with that name."""
pass
class ResourceNotFound(HttpException):
"""No resource exists with that name or id."""
pass
@ -156,16 +175,19 @@ NotFoundException = ResourceNotFound
class ResourceTimeout(SDKException):
"""Timeout waiting for resource."""
pass
class ResourceFailure(SDKException):
"""General resource failure."""
pass
class InvalidResourceQuery(SDKException):
"""Invalid query params for resource."""
pass
@ -225,8 +247,9 @@ def raise_from_response(response, error_message=None):
details = response.text
elif response.content and 'text/html' in content_type:
# Split the lines, strip whitespace and inline HTML from the response.
details = [re.sub(r'<.+?>', '', i.strip())
for i in response.text.splitlines()]
details = [
re.sub(r'<.+?>', '', i.strip()) for i in response.text.splitlines()
]
details = list(set([msg for msg in details if msg]))
# Return joined string separated by colons.
details = ': '.join(details)
@ -238,8 +261,11 @@ def raise_from_response(response, error_message=None):
request_id = response.headers.get('x-openstack-request-id')
raise cls(
message=error_message, response=response, details=details,
http_status=http_status, request_id=request_id
message=error_message,
response=response,
details=details,
http_status=http_status,
request_id=request_id,
)
@ -249,6 +275,7 @@ class UnsupportedServiceVersion(Warning):
class ArgumentDeprecationWarning(Warning):
"""A deprecated argument has been provided."""
pass

View File

@ -61,7 +61,8 @@ class ConnectionFixture(fixtures.Fixture):
templates = {}
for k, v in self._endpoint_templates.items():
suffix = self._suffixes.get(
alias, self._suffixes.get(service_type, ''))
alias, self._suffixes.get(service_type, '')
)
# For a keystone v2 catalog, we want to list the
# versioned endpoint in the catalog, because that's
# more likely how those were deployed.
@ -88,10 +89,8 @@ class ConnectionFixture(fixtures.Fixture):
continue
service_name = service['project']
ets = self._get_endpoint_templates(service_type)
v3_svc = self.v3_token.add_service(
service_type, name=service_name)
v2_svc = self.v2_token.add_service(
service_type, name=service_name)
v3_svc = self.v3_token.add_service(service_type, name=service_name)
v2_svc = self.v2_token.add_service(service_type, name=service_name)
v3_svc.add_standard_endpoints(region='RegionOne', **ets)
if service_type == 'identity':
ets = self._get_endpoint_templates(service_type, v2=True)

View File

@ -12,7 +12,6 @@
class Formatter:
@classmethod
def serialize(cls, value):
"""Return a string representing the formatted value"""
@ -25,7 +24,6 @@ class Formatter:
class BoolStr(Formatter):
@classmethod
def deserialize(cls, value):
"""Convert a boolean string to a boolean"""
@ -35,8 +33,9 @@ class BoolStr(Formatter):
elif "false" == expr:
return False
else:
raise ValueError("Unable to deserialize boolean string: %s"
% value)
raise ValueError(
"Unable to deserialize boolean string: %s" % value
)
@classmethod
def serialize(cls, value):
@ -47,5 +46,4 @@ class BoolStr(Formatter):
else:
return "false"
else:
raise ValueError("Unable to serialize boolean string: %s"
% value)
raise ValueError("Unable to serialize boolean string: %s" % value)

View File

@ -437,8 +437,7 @@ class Proxy(adapter.Adapter, Generic[T]):
self, '_connection', getattr(self.session, '_sdk_connection', None)
)
def _get_resource(self, resource_type: Type[T], value,
**attrs) -> T:
def _get_resource(self, resource_type: Type[T], value, **attrs) -> T:
"""Get a resource object to work on
:param resource_type: The type of resource to operate on. This should
@ -484,8 +483,9 @@ class Proxy(adapter.Adapter, Generic[T]):
value = resource.Resource._get_id(parent)
return value
def _find(self, resource_type: Type[T], name_or_id, ignore_missing=True,
**attrs) -> Optional[T]:
def _find(
self, resource_type: Type[T], name_or_id, ignore_missing=True, **attrs
) -> Optional[T]:
"""Find a resource
:param name_or_id: The name or ID of a resource to find.
@ -505,8 +505,9 @@ class Proxy(adapter.Adapter, Generic[T]):
)
@_check_resource(strict=False)
def _delete(self, resource_type: Type[T], value, ignore_missing=True,
**attrs):
def _delete(
self, resource_type: Type[T], value, ignore_missing=True, **attrs
):
"""Delete a resource
:param resource_type: The type of resource to delete. This should
@ -542,8 +543,9 @@ class Proxy(adapter.Adapter, Generic[T]):
return rv
@_check_resource(strict=False)
def _update(self, resource_type: Type[T], value, base_path=None,
**attrs) -> T:
def _update(
self, resource_type: Type[T], value, base_path=None, **attrs
) -> T:
"""Update a resource
:param resource_type: The type of resource to update.
@ -591,8 +593,9 @@ class Proxy(adapter.Adapter, Generic[T]):
res = resource_type.new(connection=conn, **attrs)
return res.create(self, base_path=base_path)
def _bulk_create(self, resource_type: Type[T], data, base_path=None
) -> Generator[T, None, None]:
def _bulk_create(
self, resource_type: Type[T], data, base_path=None
) -> Generator[T, None, None]:
"""Create a resource from attributes
:param resource_type: The type of resource to create.
@ -614,13 +617,13 @@ class Proxy(adapter.Adapter, Generic[T]):
@_check_resource(strict=False)
def _get(
self,
resource_type: Type[T],
value=None,
requires_id=True,
base_path=None,
skip_cache=False,
**attrs
self,
resource_type: Type[T],
value=None,
requires_id=True,
base_path=None,
skip_cache=False,
**attrs
):
"""Fetch a resource
@ -657,12 +660,12 @@ class Proxy(adapter.Adapter, Generic[T]):
)
def _list(
self,
resource_type: Type[T],
paginated=True,
base_path=None,
jmespath_filters=None,
**attrs
self,
resource_type: Type[T],
paginated=True,
base_path=None,
jmespath_filters=None,
**attrs
) -> Generator[T, None, None]:
"""List a resource
@ -690,8 +693,7 @@ class Proxy(adapter.Adapter, Generic[T]):
the ``resource_type``.
"""
data = resource_type.list(
self, paginated=paginated, base_path=base_path,
**attrs
self, paginated=paginated, base_path=base_path, **attrs
)
if jmespath_filters and isinstance(jmespath_filters, str):
@ -699,8 +701,9 @@ class Proxy(adapter.Adapter, Generic[T]):
return data
def _head(self, resource_type: Type[T], value=None, base_path=None,
**attrs):
def _head(
self, resource_type: Type[T], value=None, base_path=None, **attrs
):
"""Retrieve a resource's header
:param resource_type: The type of resource to retrieve.

View File

@ -207,15 +207,14 @@ class _BaseComponent:
def warn_if_deprecated_property(self, value):
deprecated = object.__getattribute__(self, 'deprecated')
deprecation_reason = object.__getattribute__(
self, 'deprecation_reason',
self,
'deprecation_reason',
)
if value and deprecated:
warnings.warn(
"The field %r has been deprecated. %s" % (
self.name,
deprecation_reason or "Avoid usage."
),
"The field %r has been deprecated. %s"
% (self.name, deprecation_reason or "Avoid usage."),
os_warnings.RemovedFieldWarning,
)
return value
@ -1027,9 +1026,7 @@ class Resource(dict):
converted = []
for raw in value:
if isinstance(raw, Resource):
converted.append(
raw.to_dict(_to_munch=to_munch)
)
converted.append(raw.to_dict(_to_munch=to_munch))
elif isinstance(raw, dict) and to_munch:
converted.append(utils.Munch(raw))
else:
@ -1223,10 +1220,7 @@ class Resource(dict):
requires_id = self.requires_id
# Conditionally construct arguments for _prepare_request_body
request_kwargs = {
"patch": patch,
"prepend_key": prepend_key
}
request_kwargs = {"patch": patch, "prepend_key": prepend_key}
if resource_request_key is not None:
request_kwargs['resource_request_key'] = resource_request_key
body = self._prepare_request_body(**request_kwargs)
@ -1443,7 +1437,7 @@ class Resource(dict):
resource_request_key=None,
resource_response_key=None,
microversion=None,
**params
**params,
):
"""Create a remote resource based on this instance.
@ -1532,8 +1526,7 @@ class Resource(dict):
# fetch the body if it's required but not returned by create
fetch_kwargs = {}
if resource_response_key is not None:
fetch_kwargs = \
{'resource_response_key': resource_response_key}
fetch_kwargs = {'resource_response_key': resource_response_key}
return self.fetch(session, **fetch_kwargs)
return self
@ -1681,7 +1674,8 @@ class Resource(dict):
raise exceptions.MethodNotSupported(self, 'fetch')
request = self._prepare_request(
requires_id=requires_id, base_path=base_path,
requires_id=requires_id,
base_path=base_path,
)
session = self._get_session(session)
if microversion is None:
@ -1931,8 +1925,9 @@ class Resource(dict):
retry_on_conflict=retry_on_conflict,
)
def delete(self, session, error_message=None, *, microversion=None,
**kwargs):
def delete(
self, session, error_message=None, *, microversion=None, **kwargs
):
"""Delete the remote resource based on this instance.
:param session: The session to use for making this request.
@ -1948,8 +1943,9 @@ class Resource(dict):
the resource was not found.
"""
response = self._raw_delete(session, microversion=microversion,
**kwargs)
response = self._raw_delete(
session, microversion=microversion, **kwargs
)
kwargs = {}
if error_message:
kwargs['error_message'] = error_message
@ -2116,7 +2112,8 @@ class Resource(dict):
for key in client_filters.keys():
if isinstance(client_filters[key], dict):
if not _dict_filter(
client_filters[key], value.get(key, None)):
client_filters[key], value.get(key, None)
):
filters_matched = False
break
elif value.get(key, None) != client_filters[key]:
@ -2176,7 +2173,7 @@ class Resource(dict):
# Glance has a next field in the main body
next_link = next_link or data.get('next')
if next_link and next_link.startswith('/v'):
next_link = next_link[next_link.find('/', 1):]
next_link = next_link[next_link.find('/', 1) :]
if not next_link and 'next' in response.links:
# RFC5988 specifies Link headers and requests parses them if they
@ -2281,8 +2278,11 @@ class Resource(dict):
**params,
)
return match.fetch(session, microversion=microversion, **params)
except (exceptions.NotFoundException, exceptions.BadRequestException,
exceptions.ForbiddenException):
except (
exceptions.NotFoundException,
exceptions.BadRequestException,
exceptions.ForbiddenException,
):
# NOTE(gtema): There are few places around openstack that return
# 400 if we try to GET resource and it doesn't exist.
pass

View File

@ -36,7 +36,9 @@ class _ServiceDisabledProxyShim:
raise exceptions.ServiceDisabledException(
"Service '{service_type}' is disabled because its configuration "
"could not be loaded. {reason}".format(
service_type=self.service_type, reason=self.reason or ''))
service_type=self.service_type, reason=self.reason or ''
)
)
class ServiceDescription:
@ -73,9 +75,8 @@ class ServiceDescription:
"""
self.service_type = service_type or self.service_type
self.supported_versions = (
supported_versions
or self.supported_versions
or {})
supported_versions or self.supported_versions or {}
)
self.aliases = aliases or self.aliases
self.all_types = [service_type] + self.aliases
@ -135,7 +136,9 @@ class ServiceDescription:
"Failed to create a working proxy for service {service_type}: "
"{message}".format(
service_type=self.service_type,
message=exc or "No valid endpoint was discoverable."))
message=exc or "No valid endpoint was discoverable.",
)
)
def _make_proxy(self, instance):
"""Create a Proxy for the service in question.
@ -148,7 +151,8 @@ class ServiceDescription:
if not config.has_service(self.service_type):
return _ServiceDisabledProxyShim(
self.service_type,
config.get_disabled_reason(self.service_type))
config.get_disabled_reason(self.service_type),
)
# We don't know anything about this service, so the user is
# explicitly just using us for a passthrough REST adapter.
@ -186,13 +190,12 @@ class ServiceDescription:
" {service_type} is not known or supported by"
" openstacksdk. The resulting Proxy object will only"
" have direct passthrough REST capabilities.".format(
version=version_string,
service_type=self.service_type),
category=exceptions.UnsupportedServiceVersion)
version=version_string, service_type=self.service_type
),
category=exceptions.UnsupportedServiceVersion,
)
elif endpoint_override:
temp_adapter = config.get_session_client(
self.service_type
)
temp_adapter = config.get_session_client(self.service_type)
api_version = temp_adapter.get_endpoint_data().api_version
proxy_class = self.supported_versions.get(str(api_version[0]))
if proxy_class:
@ -207,9 +210,10 @@ class ServiceDescription:
" is not supported by openstacksdk. The resulting Proxy"
" object will only have direct passthrough REST"
" capabilities.".format(
version=api_version,
service_type=self.service_type),
category=exceptions.UnsupportedServiceVersion)
version=api_version, service_type=self.service_type
),
category=exceptions.UnsupportedServiceVersion,
)
if proxy_obj:
@ -225,7 +229,9 @@ class ServiceDescription:
raise exceptions.ServiceDiscoveryException(
"Failed to create a working proxy for service "
"{service_type}: No endpoint data found.".format(
service_type=self.service_type))
service_type=self.service_type
)
)
# If we've gotten here with a proxy object it means we have
# an endpoint_override in place. If the catalog_url and
@ -235,7 +241,8 @@ class ServiceDescription:
# so that subsequent discovery calls don't get made incorrectly.
if data.catalog_url != data.service_url:
ep_key = '{service_type}_endpoint_override'.format(
service_type=self.service_type.replace('-', '_'))
service_type=self.service_type.replace('-', '_')
)
config.config[ep_key] = data.service_url
proxy_obj = config.get_session_client(
self.service_type,
@ -248,16 +255,16 @@ class ServiceDescription:
if version_string:
version_kwargs['version'] = version_string
else:
supported_versions = sorted([
int(f) for f in self.supported_versions])
supported_versions = sorted(
[int(f) for f in self.supported_versions]
)
version_kwargs['min_version'] = str(supported_versions[0])
version_kwargs['max_version'] = '{version}.latest'.format(
version=str(supported_versions[-1]))
version=str(supported_versions[-1])
)
temp_adapter = config.get_session_client(
self.service_type,
allow_version_hack=True,
**version_kwargs
self.service_type, allow_version_hack=True, **version_kwargs
)
found_version = temp_adapter.get_api_major_version()
if found_version is None:
@ -268,14 +275,18 @@ class ServiceDescription:
" exists but does not have any supported versions.".format(
service_type=self.service_type,
cloud=instance.name,
region_name=region_name))
region_name=region_name,
)
)
else:
raise exceptions.NotSupported(
"The {service_type} service for {cloud}:{region_name}"
" exists but no version was discoverable.".format(
service_type=self.service_type,
cloud=instance.name,
region_name=region_name))
region_name=region_name,
)
)
proxy_class = self.supported_versions.get(str(found_version[0]))
if proxy_class:
return config.get_session_client(
@ -294,8 +305,10 @@ class ServiceDescription:
"Service {service_type} has no discoverable version."
" The resulting Proxy object will only have direct"
" passthrough REST capabilities.".format(
service_type=self.service_type),
category=exceptions.UnsupportedServiceVersion)
service_type=self.service_type
),
category=exceptions.UnsupportedServiceVersion,
)
return temp_adapter
def __set__(self, instance, value):

View File

@ -47,7 +47,9 @@ class TestCase(base.BaseTestCase):
test_timeout = int(test_timeout * self.TIMEOUT_SCALING_FACTOR)
self.useFixture(
fixtures.EnvironmentVariable(
'OS_TEST_TIMEOUT', str(test_timeout)))
'OS_TEST_TIMEOUT', str(test_timeout)
)
)
except ValueError:
# Let oslotest do its thing
pass
@ -90,7 +92,8 @@ class TestCase(base.BaseTestCase):
if isinstance(second, utils.Munch):
second = second.toDict()
return super(TestCase, self).assertEqual(
first, second, *args, **kwargs)
first, second, *args, **kwargs
)
def printLogs(self, *args):
self._log_stream.seek(0)
@ -104,16 +107,18 @@ class TestCase(base.BaseTestCase):
if not x:
break
yield x.encode('utf8')
content = testtools.content.content_from_reader(
reader,
testtools.content_type.UTF8_TEXT,
False)
reader, testtools.content_type.UTF8_TEXT, False
)
self.addDetail('logging', content)
def add_info_on_exception(self, name, text):
def add_content(unused):
self.addDetail(name, testtools.content.text_content(
pprint.pformat(text)))
self.addDetail(
name, testtools.content.text_content(pprint.pformat(text))
)
self.addOnException(add_content)
def assertSubdict(self, part, whole):
@ -124,11 +129,18 @@ class TestCase(base.BaseTestCase):
if not whole[key] and part[key]:
missing_keys.append(key)
if missing_keys:
self.fail("Keys %s are in %s but not in %s" %
(missing_keys, part, whole))
wrong_values = [(key, part[key], whole[key])
for key in part if part[key] != whole[key]]
self.fail(
"Keys %s are in %s but not in %s" % (missing_keys, part, whole)
)
wrong_values = [
(key, part[key], whole[key])
for key in part
if part[key] != whole[key]
]
if wrong_values:
self.fail("Mismatched values: %s" %
", ".join("for %s got %s and %s" % tpl
for tpl in wrong_values))
self.fail(
"Mismatched values: %s"
% ", ".join(
"for %s got %s and %s" % tpl for tpl in wrong_values
)
)

View File

@ -32,7 +32,8 @@ CHOCOLATE_FLAVOR_ID = u'0c1d9008-f546-4608-9e8f-f8bdaec8ddde'
STRAWBERRY_FLAVOR_ID = u'0c1d9008-f546-4608-9e8f-f8bdaec8dddf'
COMPUTE_ENDPOINT = 'https://compute.example.com/v2.1'
ORCHESTRATION_ENDPOINT = 'https://orchestration.example.com/v1/{p}'.format(
p=PROJECT_ID)
p=PROJECT_ID
)
NO_MD5 = '93b885adfe0da089cdf634904fd59f71'
NO_SHA256 = '6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d'
FAKE_PUBLIC_KEY = (
@ -41,7 +42,8 @@ FAKE_PUBLIC_KEY = (
"sZacm0cZNuL69EObEGHdprfGJQajrpz22NQoCD8TFB8Wv+8om9NH9Le6s+WPe98WC77KLw8qg"
"fQsbIey+JawPWl4O67ZdL5xrypuRjfIPWjgy/VH85IXg/Z/GONZ2nxHgSShMkwqSFECAC5L3P"
"HB+0+/12M/iikdatFSVGjpuHvkLOs3oe7m6HlOfluSJ85BzLWBbvva93qkGmLg4ZAc8rPh2O+"
"YIsBUHNLLMM/oQp Generated-by-Nova\n")
"YIsBUHNLLMM/oQp Generated-by-Nova\n"
)
def make_fake_flavor(flavor_id, name, ram=100, disk=1600, vcpus=24):
@ -50,29 +52,36 @@ def make_fake_flavor(flavor_id, name, ram=100, disk=1600, vcpus=24):
u'OS-FLV-EXT-DATA:ephemeral': 0,
u'disk': disk,
u'id': flavor_id,
u'links': [{
u'href': u'{endpoint}/flavors/{id}'.format(
endpoint=COMPUTE_ENDPOINT, id=flavor_id),
u'rel': u'self'
}, {
u'href': u'{endpoint}/flavors/{id}'.format(
endpoint=COMPUTE_ENDPOINT, id=flavor_id),
u'rel': u'bookmark'
}],
u'links': [
{
u'href': u'{endpoint}/flavors/{id}'.format(
endpoint=COMPUTE_ENDPOINT, id=flavor_id
),
u'rel': u'self',
},
{
u'href': u'{endpoint}/flavors/{id}'.format(
endpoint=COMPUTE_ENDPOINT, id=flavor_id
),
u'rel': u'bookmark',
},
],
u'name': name,
u'os-flavor-access:is_public': True,
u'ram': ram,
u'rxtx_factor': 1.0,
u'swap': u'',
u'vcpus': vcpus
u'vcpus': vcpus,
}
FAKE_FLAVOR = make_fake_flavor(FLAVOR_ID, 'vanilla')
FAKE_CHOCOLATE_FLAVOR = make_fake_flavor(
CHOCOLATE_FLAVOR_ID, 'chocolate', ram=200)
CHOCOLATE_FLAVOR_ID, 'chocolate', ram=200
)
FAKE_STRAWBERRY_FLAVOR = make_fake_flavor(
STRAWBERRY_FLAVOR_ID, 'strawberry', ram=300)
STRAWBERRY_FLAVOR_ID, 'strawberry', ram=300
)
FAKE_FLAVOR_LIST = [FAKE_FLAVOR, FAKE_CHOCOLATE_FLAVOR, FAKE_STRAWBERRY_FLAVOR]
FAKE_TEMPLATE = '''heat_template_version: 2014-10-16
@ -95,8 +104,14 @@ FAKE_TEMPLATE_CONTENT = template_format.parse(FAKE_TEMPLATE)
def make_fake_server(
server_id, name, status='ACTIVE', admin_pass=None,
addresses=None, image=None, flavor=None):
server_id,
name,
status='ACTIVE',
admin_pass=None,
addresses=None,
image=None,
flavor=None,
):
if addresses is None:
if status == 'ACTIVE':
addresses = {
@ -105,25 +120,28 @@ def make_fake_server(
"OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:df:b0:8d",
"version": 6,
"addr": "fddb:b018:307:0:f816:3eff:fedf:b08d",
"OS-EXT-IPS:type": "fixed"},
"OS-EXT-IPS:type": "fixed",
},
{
"OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:df:b0:8d",
"version": 4,
"addr": "10.1.0.9",
"OS-EXT-IPS:type": "fixed"},
"OS-EXT-IPS:type": "fixed",
},
{
"OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:df:b0:8d",
"version": 4,
"addr": "172.24.5.5",
"OS-EXT-IPS:type": "floating"}]}
"OS-EXT-IPS:type": "floating",
},
]
}
else:
addresses = {}
if image is None:
image = {"id": "217f3ab1-03e0-4450-bf27-63d52b421e9e",
"links": []}
image = {"id": "217f3ab1-03e0-4450-bf27-63d52b421e9e", "links": []}
if flavor is None:
flavor = {"id": "64",
"links": []}
flavor = {"id": "64", "links": []}
server = {
"OS-EXT-STS:task_state": None,
@ -152,7 +170,8 @@ def make_fake_server(
"created": "2017-03-23T23:57:12Z",
"tenant_id": PROJECT_ID,
"os-extended-volumes:volumes_attached": [],
"config_drive": "True"}
"config_drive": "True",
}
if admin_pass:
server['adminPass'] = admin_pass
return json.loads(json.dumps(server))
@ -188,7 +207,8 @@ def make_fake_stack(id, name, description=None, status='CREATE_COMPLETE'):
def make_fake_stack_event(
id, name, status='CREATE_COMPLETED', resource_name='id'):
id, name, status='CREATE_COMPLETED', resource_name='id'
):
event_id = uuid.uuid4().hex
self_url = "{endpoint}/stacks/{name}/{id}/resources/{name}/events/{event}"
resource_url = "{endpoint}/stacks/{name}/{id}/resources/{name}"
@ -199,19 +219,25 @@ def make_fake_stack_event(
{
"href": self_url.format(
endpoint=ORCHESTRATION_ENDPOINT,
name=name, id=id, event=event_id),
"rel": "self"
}, {
name=name,
id=id,
event=event_id,
),
"rel": "self",
},
{
"href": resource_url.format(
endpoint=ORCHESTRATION_ENDPOINT,
name=name, id=id),
"rel": "resource"
}, {
endpoint=ORCHESTRATION_ENDPOINT, name=name, id=id
),
"rel": "resource",
},
{
"href": "{endpoint}/stacks/{name}/{id}".format(
endpoint=ORCHESTRATION_ENDPOINT,
name=name, id=id),
"rel": "stack"
}],
endpoint=ORCHESTRATION_ENDPOINT, name=name, id=id
),
"rel": "stack",
},
],
"logical_resource_id": name,
"resource_status": status,
"resource_status_reason": "",
@ -221,10 +247,14 @@ def make_fake_stack_event(
def make_fake_image(
image_id=None, md5=NO_MD5, sha256=NO_SHA256, status='active',
image_name=u'fake_image',
data=None,
checksum=u'ee36e35a297980dee1b514de9803ec6d'):
image_id=None,
md5=NO_MD5,
sha256=NO_SHA256,
status='active',
image_name=u'fake_image',
data=None,
checksum=u'ee36e35a297980dee1b514de9803ec6d',
):
if data:
md5 = utils.md5(usedforsecurity=False)
sha256 = hashlib.sha256()
@ -249,9 +279,9 @@ def make_fake_image(
u'status': status,
u'tags': [],
u'visibility': u'private',
u'locations': [{
u'url': u'http://127.0.0.1/images/' + image_id,
u'metadata': {}}],
u'locations': [
{u'url': u'http://127.0.0.1/images/' + image_id, u'metadata': {}}
],
u'min_disk': 40,
u'virtual_size': None,
u'name': image_name,
@ -260,16 +290,16 @@ def make_fake_image(
u'owner_specified.openstack.md5': md5 or NO_MD5,
u'owner_specified.openstack.sha256': sha256 or NO_SHA256,
u'owner_specified.openstack.object': 'images/{name}'.format(
name=image_name),
u'protected': False}
name=image_name
),
u'protected': False,
}
def make_fake_machine(machine_name, machine_id=None):
if not machine_id:
machine_id = uuid.uuid4().hex
return meta.obj_to_munch(FakeMachine(
id=machine_id,
name=machine_name))
return meta.obj_to_munch(FakeMachine(id=machine_id, name=machine_name))
def make_fake_port(address, node_id=None, port_id=None):
@ -277,10 +307,9 @@ def make_fake_port(address, node_id=None, port_id=None):
node_id = uuid.uuid4().hex
if not port_id:
port_id = uuid.uuid4().hex
return meta.obj_to_munch(FakeMachinePort(
id=port_id,
address=address,
node_id=node_id))
return meta.obj_to_munch(
FakeMachinePort(id=port_id, address=address, node_id=node_id)
)
class FakeFloatingIP:
@ -293,63 +322,58 @@ class FakeFloatingIP:
def make_fake_server_group(id, name, policies):
return json.loads(json.dumps({
'id': id,
'name': name,
'policies': policies,
'members': [],
'metadata': {},
}))
return json.loads(
json.dumps(
{
'id': id,
'name': name,
'policies': policies,
'members': [],
'metadata': {},
}
)
)
def make_fake_hypervisor(id, name):
return json.loads(json.dumps({
'id': id,
'hypervisor_hostname': name,
'state': 'up',
'status': 'enabled',
"cpu_info": {
"arch": "x86_64",
"model": "Nehalem",
"vendor": "Intel",
"features": [
"pge",
"clflush"
],
"topology": {
"cores": 1,
"threads": 1,
"sockets": 4
return json.loads(
json.dumps(
{
'id': id,
'hypervisor_hostname': name,
'state': 'up',
'status': 'enabled',
"cpu_info": {
"arch": "x86_64",
"model": "Nehalem",
"vendor": "Intel",
"features": ["pge", "clflush"],
"topology": {"cores": 1, "threads": 1, "sockets": 4},
},
"current_workload": 0,
"status": "enabled",
"state": "up",
"disk_available_least": 0,
"host_ip": "1.1.1.1",
"free_disk_gb": 1028,
"free_ram_mb": 7680,
"hypervisor_type": "fake",
"hypervisor_version": 1000,
"local_gb": 1028,
"local_gb_used": 0,
"memory_mb": 8192,
"memory_mb_used": 512,
"running_vms": 0,
"service": {"host": "host1", "id": 7, "disabled_reason": None},
"vcpus": 1,
"vcpus_used": 0,
}
},
"current_workload": 0,
"status": "enabled",
"state": "up",
"disk_available_least": 0,
"host_ip": "1.1.1.1",
"free_disk_gb": 1028,
"free_ram_mb": 7680,
"hypervisor_type": "fake",
"hypervisor_version": 1000,
"local_gb": 1028,
"local_gb_used": 0,
"memory_mb": 8192,
"memory_mb_used": 512,
"running_vms": 0,
"service": {
"host": "host1",
"id": 7,
"disabled_reason": None
},
"vcpus": 1,
"vcpus_used": 0
}))
)
)
class FakeVolume:
def __init__(
self, id, status, name, attachments=[],
size=75):
def __init__(self, id, status, name, attachments=[], size=75):
self.id = id
self.status = status
self.name = name
@ -366,8 +390,7 @@ class FakeVolume:
class FakeVolumeSnapshot:
def __init__(
self, id, status, name, description, size=75):
def __init__(self, id, status, name, description, size=75):
self.id = id
self.status = status
self.name = name
@ -380,10 +403,20 @@ class FakeVolumeSnapshot:
class FakeMachine:
def __init__(self, id, name=None, driver=None, driver_info=None,
chassis_uuid=None, instance_info=None, instance_uuid=None,
properties=None, reservation=None, last_error=None,
provision_state='available'):
def __init__(
self,
id,
name=None,
driver=None,
driver_info=None,
chassis_uuid=None,
instance_info=None,
instance_uuid=None,
properties=None,
reservation=None,
last_error=None,
provision_state='available',
):
self.uuid = id
self.name = name
self.driver = driver
@ -405,50 +438,69 @@ class FakeMachinePort:
def make_fake_neutron_security_group(
id, name, description, rules, stateful=True, project_id=None):
id, name, description, rules, stateful=True, project_id=None
):
if not rules:
rules = []
if not project_id:
project_id = PROJECT_ID
return json.loads(json.dumps({
'id': id,
'name': name,
'description': description,
'stateful': stateful,
'project_id': project_id,
'tenant_id': project_id,
'security_group_rules': rules,
}))
return json.loads(
json.dumps(
{
'id': id,
'name': name,
'description': description,
'stateful': stateful,
'project_id': project_id,
'tenant_id': project_id,
'security_group_rules': rules,
}
)
)
def make_fake_nova_security_group_rule(
id, from_port, to_port, ip_protocol, cidr):
return json.loads(json.dumps({
'id': id,
'from_port': int(from_port),
'to_port': int(to_port),
'ip_protcol': 'tcp',
'ip_range': {
'cidr': cidr
}
}))
id, from_port, to_port, ip_protocol, cidr
):
return json.loads(
json.dumps(
{
'id': id,
'from_port': int(from_port),
'to_port': int(to_port),
'ip_protcol': 'tcp',
'ip_range': {'cidr': cidr},
}
)
)
def make_fake_nova_security_group(id, name, description, rules):
if not rules:
rules = []
return json.loads(json.dumps({
'id': id,
'name': name,
'description': description,
'tenant_id': PROJECT_ID,
'rules': rules,
}))
return json.loads(
json.dumps(
{
'id': id,
'name': name,
'description': description,
'tenant_id': PROJECT_ID,
'rules': rules,
}
)
)
class FakeNovaSecgroupRule:
def __init__(self, id, from_port=None, to_port=None, ip_protocol=None,
cidr=None, parent_group_id=None):
def __init__(
self,
id,
from_port=None,
to_port=None,
ip_protocol=None,
cidr=None,
parent_group_id=None,
):
self.id = id
self.from_port = from_port
self.to_port = to_port
@ -465,8 +517,7 @@ class FakeHypervisor:
class FakeZone:
def __init__(self, id, name, type_, email, description,
ttl, masters):
def __init__(self, id, name, type_, email, description, ttl, masters):
self.id = id
self.name = name
self.type_ = type_
@ -477,8 +528,7 @@ class FakeZone:
class FakeRecordset:
def __init__(self, zone, id, name, type_, description,
ttl, records):
def __init__(self, zone, id, name, type_, description, ttl, records):
self.zone = zone
self.id = id
self.name = name
@ -488,22 +538,27 @@ class FakeRecordset:
self.records = records
def make_fake_aggregate(id, name, availability_zone='nova',
metadata=None, hosts=None):
def make_fake_aggregate(
id, name, availability_zone='nova', metadata=None, hosts=None
):
if not metadata:
metadata = {}
if not hosts:
hosts = []
return json.loads(json.dumps({
"availability_zone": availability_zone,
"created_at": datetime.datetime.now().isoformat(),
"deleted": False,
"deleted_at": None,
"hosts": hosts,
"id": int(id),
"metadata": {
"availability_zone": availability_zone,
},
"name": name,
"updated_at": None,
}))
return json.loads(
json.dumps(
{
"availability_zone": availability_zone,
"created_at": datetime.datetime.now().isoformat(),
"deleted": False,
"deleted_at": None,
"hosts": hosts,
"id": int(id),
"metadata": {
"availability_zone": availability_zone,
},
"name": name,
"updated_at": None,
}
)
)

View File

@ -51,10 +51,12 @@ class BaseFunctionalTest(base.TestCase):
self._demo_name = os.environ.get('OPENSTACKSDK_DEMO_CLOUD', 'devstack')
self._demo_name_alt = os.environ.get(
'OPENSTACKSDK_DEMO_CLOUD_ALT', 'devstack-alt',
'OPENSTACKSDK_DEMO_CLOUD_ALT',
'devstack-alt',
)
self._op_name = os.environ.get(
'OPENSTACKSDK_OPERATOR_CLOUD', 'devstack-admin',
'OPENSTACKSDK_OPERATOR_CLOUD',
'devstack-admin',
)
self.config = openstack.config.OpenStackConfig()
@ -64,8 +66,9 @@ class BaseFunctionalTest(base.TestCase):
else:
self.operator_cloud = None
self.identity_version = \
self.user_cloud.config.get_api_version('identity')
self.identity_version = self.user_cloud.config.get_api_version(
'identity'
)
self.flavor = self._pick_flavor()
self.image = self._pick_image()
@ -73,8 +76,11 @@ class BaseFunctionalTest(base.TestCase):
# Defines default timeout for wait_for methods used
# in the functional tests
self._wait_for_timeout = int(
os.getenv(self._wait_for_timeout_key, os.getenv(
'OPENSTACKSDK_FUNC_TEST_TIMEOUT', 300)))
os.getenv(
self._wait_for_timeout_key,
os.getenv('OPENSTACKSDK_FUNC_TEST_TIMEOUT', 300),
)
)
def _set_user_cloud(self, **kwargs):
user_config = self.config.get_one(cloud=self._demo_name, **kwargs)
@ -85,7 +91,8 @@ class BaseFunctionalTest(base.TestCase):
# it
if self._demo_name_alt:
user_config_alt = self.config.get_one(
cloud=self._demo_name_alt, **kwargs)
cloud=self._demo_name_alt, **kwargs
)
self.user_cloud_alt = connection.Connection(config=user_config_alt)
_disable_keep_alive(self.user_cloud_alt)
else:
@ -119,7 +126,8 @@ class BaseFunctionalTest(base.TestCase):
return flavor
raise self.failureException(
"Cloud does not have flavor '%s'", flavor_name,
"Cloud does not have flavor '%s'",
flavor_name,
)
# Enable running functional tests against RAX, which requires
@ -159,7 +167,8 @@ class BaseFunctionalTest(base.TestCase):
return image
raise self.failureException(
"Cloud does not have image '%s'", image_name,
"Cloud does not have image '%s'",
image_name,
)
for image in images:
@ -186,6 +195,7 @@ class BaseFunctionalTest(base.TestCase):
def cleanup():
result = func(*args, **kwargs)
self.assertIsNone(result)
self.addCleanup(cleanup)
def require_service(self, service_type, min_microversion=None, **kwargs):
@ -201,14 +211,18 @@ class BaseFunctionalTest(base.TestCase):
:returns: True if the service exists, otherwise False.
"""
if not self.conn.has_service(service_type):
self.skipTest('Service {service_type} not found in cloud'.format(
service_type=service_type))
self.skipTest(
'Service {service_type} not found in cloud'.format(
service_type=service_type
)
)
if not min_microversion:
return
data = self.conn.session.get_endpoint_data(
service_type=service_type, **kwargs)
service_type=service_type, **kwargs
)
if not (
data.min_microversion
@ -230,12 +244,11 @@ class BaseFunctionalTest(base.TestCase):
# unix_t is also used to easier determine orphans when running real
# functional tests on a real cloud
return (prefix if prefix else '') + "{time}-{uuid}".format(
time=int(time.time()),
uuid=uuid.uuid4().hex)
time=int(time.time()), uuid=uuid.uuid4().hex
)
class KeystoneBaseFunctionalTest(BaseFunctionalTest):
def setUp(self):
super(KeystoneBaseFunctionalTest, self).setUp()

View File

@ -37,44 +37,49 @@ from openstack.tests import fakes
_ProjectData = collections.namedtuple(
'ProjectData',
'project_id, project_name, enabled, domain_id, description, '
'parent_id, json_response, json_request')
'parent_id, json_response, json_request',
)
_UserData = collections.namedtuple(
'UserData',
'user_id, password, name, email, description, domain_id, enabled, '
'json_response, json_request')
'json_response, json_request',
)
_GroupData = collections.namedtuple(
'GroupData',
'group_id, group_name, domain_id, description, json_response, '
'json_request')
'json_request',
)
_DomainData = collections.namedtuple(
'DomainData',
'domain_id, domain_name, description, json_response, '
'json_request')
'domain_id, domain_name, description, json_response, ' 'json_request',
)
_ServiceData = collections.namedtuple(
'Servicedata',
'service_id, service_name, service_type, description, enabled, '
'json_response_v3, json_response_v2, json_request')
'json_response_v3, json_response_v2, json_request',
)
_EndpointDataV3 = collections.namedtuple(
'EndpointData',
'endpoint_id, service_id, interface, region_id, url, enabled, '
'json_response, json_request')
'json_response, json_request',
)
# NOTE(notmorgan): Shade does not support domain-specific roles
# This should eventually be fixed if it becomes a main-stream feature.
_RoleData = collections.namedtuple(
'RoleData',
'role_id, role_name, json_response, json_request')
'RoleData', 'role_id, role_name, json_response, json_request'
)
class TestCase(base.TestCase):
@ -92,17 +97,20 @@ class TestCase(base.TestCase):
def _nosleep(seconds):
return realsleep(seconds * 0.0001)
self.sleep_fixture = self.useFixture(fixtures.MonkeyPatch(
'time.sleep',
_nosleep))
self.sleep_fixture = self.useFixture(
fixtures.MonkeyPatch('time.sleep', _nosleep)
)
self.fixtures_directory = 'openstack/tests/unit/fixtures'
self.os_fixture = self.useFixture(
os_fixture.ConnectionFixture(project_id=fakes.PROJECT_ID))
os_fixture.ConnectionFixture(project_id=fakes.PROJECT_ID)
)
# Isolate openstack.config from test environment
config = tempfile.NamedTemporaryFile(delete=False)
cloud_path = '%s/clouds/%s' % (self.fixtures_directory,
cloud_config_fixture)
cloud_path = '%s/clouds/%s' % (
self.fixtures_directory,
cloud_config_fixture,
)
with open(cloud_path, 'rb') as f:
content = f.read()
config.write(content)
@ -115,7 +123,8 @@ class TestCase(base.TestCase):
self.config = occ.OpenStackConfig(
config_files=[config.name],
vendor_files=[vendor.name],
secure_files=['non-existant'])
secure_files=['non-existant'],
)
self.oslo_config_dict = {
# All defaults for nova
@ -126,7 +135,7 @@ class TestCase(base.TestCase):
'heat': {
'region_name': 'SpecialRegion',
'interface': 'internal',
'endpoint_override': 'https://example.org:8888/heat/v2'
'endpoint_override': 'https://example.org:8888/heat/v2',
},
# test a service with dashes
'ironic_inspector': {
@ -151,7 +160,8 @@ class TestCase(base.TestCase):
# request in the correct orders.
self._uri_registry = collections.OrderedDict()
self.discovery_json = os.path.join(
self.fixtures_directory, 'discovery.json')
self.fixtures_directory, 'discovery.json'
)
self.use_keystone_v3()
self.__register_uris_called = False
@ -166,11 +176,18 @@ class TestCase(base.TestCase):
return conf
# TODO(shade) Update this to handle service type aliases
def get_mock_url(self, service_type, interface='public', resource=None,
append=None, base_url_append=None,
qs_elements=None):
def get_mock_url(
self,
service_type,
interface='public',
resource=None,
append=None,
base_url_append=None,
qs_elements=None,
):
endpoint_url = self.cloud.endpoint_for(
service_type=service_type, interface=interface)
service_type=service_type, interface=interface
)
# Strip trailing slashes, so as not to produce double-slashes below
if endpoint_url.endswith('/'):
endpoint_url = endpoint_url[:-1]
@ -184,13 +201,17 @@ class TestCase(base.TestCase):
to_join.extend([urllib.parse.quote(i) for i in append])
if qs_elements is not None:
qs = '?%s' % '&'.join(qs_elements)
return '%(uri)s%(qs)s' % {
'uri': '/'.join(to_join),
'qs': qs}
return '%(uri)s%(qs)s' % {'uri': '/'.join(to_join), 'qs': qs}
def mock_for_keystone_projects(self, project=None, v3=True,
list_get=False, id_get=False,
project_list=None, project_count=None):
def mock_for_keystone_projects(
self,
project=None,
v3=True,
list_get=False,
id_get=False,
project_list=None,
project_count=None,
):
if project:
assert not (project_list or project_count)
elif project_list:
@ -198,8 +219,9 @@ class TestCase(base.TestCase):
elif project_count:
assert not (project or project_list)
else:
raise Exception('Must specify a project, project_list, '
'or project_count')
raise Exception(
'Must specify a project, project_list, ' 'or project_count'
)
assert list_get or id_get
base_url_append = 'v3' if v3 else None
@ -207,40 +229,57 @@ class TestCase(base.TestCase):
project_list = [project]
elif project_count:
# Generate multiple projects
project_list = [self._get_project_data(v3=v3)
for c in range(0, project_count)]
project_list = [
self._get_project_data(v3=v3) for c in range(0, project_count)
]
uri_mock_list = []
if list_get:
uri_mock_list.append(
dict(method='GET',
uri=self.get_mock_url(
service_type='identity',
interface='admin',
resource='projects',
base_url_append=base_url_append),
status_code=200,
json={'projects': [p.json_response['project']
for p in project_list]})
dict(
method='GET',
uri=self.get_mock_url(
service_type='identity',
interface='admin',
resource='projects',
base_url_append=base_url_append,
),
status_code=200,
json={
'projects': [
p.json_response['project'] for p in project_list
]
},
)
)
if id_get:
for p in project_list:
uri_mock_list.append(
dict(method='GET',
uri=self.get_mock_url(
service_type='identity',
interface='admin',
resource='projects',
append=[p.project_id],
base_url_append=base_url_append),
status_code=200,
json=p.json_response)
dict(
method='GET',
uri=self.get_mock_url(
service_type='identity',
interface='admin',
resource='projects',
append=[p.project_id],
base_url_append=base_url_append,
),
status_code=200,
json=p.json_response,
)
)
self.__do_register_uris(uri_mock_list)
return project_list
def _get_project_data(self, project_name=None, enabled=None,
domain_id=None, description=None, v3=True,
project_id=None, parent_id=None):
def _get_project_data(
self,
project_name=None,
enabled=None,
domain_id=None,
description=None,
v3=True,
project_id=None,
parent_id=None,
):
project_name = project_name or self.getUniqueString('projectName')
project_id = uuid.UUID(project_id or uuid.uuid4().hex).hex
if parent_id:
@ -264,9 +303,16 @@ class TestCase(base.TestCase):
response['description'] = description
request['description'] = description
request.setdefault('description', None)
return _ProjectData(project_id, project_name, enabled, domain_id,
description, parent_id,
{'project': response}, {'project': request})
return _ProjectData(
project_id,
project_name,
enabled,
domain_id,
description,
parent_id,
{'project': response},
{'project': request},
)
def _get_group_data(self, name=None, domain_id=None, description=None):
group_id = uuid.uuid4().hex
@ -278,8 +324,14 @@ class TestCase(base.TestCase):
response['description'] = description
request['description'] = description
return _GroupData(group_id, name, domain_id, description,
{'group': response}, {'group': request})
return _GroupData(
group_id,
name,
domain_id,
description,
{'group': response},
{'group': request},
)
def _get_user_data(self, name=None, password=None, **kwargs):
@ -305,16 +357,27 @@ class TestCase(base.TestCase):
if response['description']:
request['description'] = response['description']
self.assertIs(0, len(kwargs), message='extra key-word args received '
'on _get_user_data')
self.assertIs(
0,
len(kwargs),
message='extra key-word args received ' 'on _get_user_data',
)
return _UserData(user_id, password, name, response['email'],
response['description'], response.get('domain_id'),
response.get('enabled'), {'user': response},
{'user': request})
return _UserData(
user_id,
password,
name,
response['email'],
response['description'],
response.get('domain_id'),
response.get('enabled'),
{'user': response},
{'user': request},
)
def _get_domain_data(self, domain_name=None, description=None,
enabled=None):
def _get_domain_data(
self, domain_name=None, description=None, enabled=None
):
domain_id = uuid.uuid4().hex
domain_name = domain_name or self.getUniqueString('domainName')
response = {'id': domain_id, 'name': domain_name}
@ -326,41 +389,76 @@ class TestCase(base.TestCase):
response['description'] = description
request['description'] = description
response.setdefault('enabled', True)
return _DomainData(domain_id, domain_name, description,
{'domain': response}, {'domain': request})
return _DomainData(
domain_id,
domain_name,
description,
{'domain': response},
{'domain': request},
)
def _get_service_data(self, type=None, name=None, description=None,
enabled=True):
def _get_service_data(
self, type=None, name=None, description=None, enabled=True
):
service_id = uuid.uuid4().hex
name = name or uuid.uuid4().hex
type = type or uuid.uuid4().hex
response = {'id': service_id, 'name': name, 'type': type,
'enabled': enabled}
response = {
'id': service_id,
'name': name,
'type': type,
'enabled': enabled,
}
if description is not None:
response['description'] = description
request = response.copy()
request.pop('id')
return _ServiceData(service_id, name, type, description, enabled,
{'service': response},
{'OS-KSADM:service': response}, request)
return _ServiceData(
service_id,
name,
type,
description,
enabled,
{'service': response},
{'OS-KSADM:service': response},
request,
)
def _get_endpoint_v3_data(self, service_id=None, region=None,
url=None, interface=None, enabled=True):
def _get_endpoint_v3_data(
self,
service_id=None,
region=None,
url=None,
interface=None,
enabled=True,
):
endpoint_id = uuid.uuid4().hex
service_id = service_id or uuid.uuid4().hex
region = region or uuid.uuid4().hex
url = url or 'https://example.com/'
interface = interface or uuid.uuid4().hex
response = {'id': endpoint_id, 'service_id': service_id,
'region_id': region, 'interface': interface,
'url': url, 'enabled': enabled}
response = {
'id': endpoint_id,
'service_id': service_id,
'region_id': region,
'interface': interface,
'url': url,
'enabled': enabled,
}
request = response.copy()
request.pop('id')
return _EndpointDataV3(endpoint_id, service_id, interface, region,
url, enabled, {'endpoint': response},
{'endpoint': request})
return _EndpointDataV3(
endpoint_id,
service_id,
interface,
region,
url,
enabled,
{'endpoint': response},
{'endpoint': request},
)
def _get_role_data(self, role_name=None):
role_id = uuid.uuid4().hex
@ -368,20 +466,28 @@ class TestCase(base.TestCase):
request = {'name': role_name}
response = request.copy()
response['id'] = role_id
return _RoleData(role_id, role_name, {'role': response},
{'role': request})
return _RoleData(
role_id, role_name, {'role': response}, {'role': request}
)
def use_broken_keystone(self):
self.adapter = self.useFixture(rm_fixture.Fixture())
self.calls = []
self._uri_registry.clear()
self.__do_register_uris([
dict(method='GET', uri='https://identity.example.com/',
text=open(self.discovery_json, 'r').read()),
dict(method='POST',
uri='https://identity.example.com/v3/auth/tokens',
status_code=400),
])
self.__do_register_uris(
[
dict(
method='GET',
uri='https://identity.example.com/',
text=open(self.discovery_json, 'r').read(),
),
dict(
method='POST',
uri='https://identity.example.com/v3/auth/tokens',
status_code=400,
),
]
)
self._make_test_cloud(identity_api_version='3')
def use_nothing(self):
@ -389,40 +495,38 @@ class TestCase(base.TestCase):
self._uri_registry.clear()
def get_keystone_v3_token(
self,
project_name='admin',
self,
project_name='admin',
):
return dict(
method='POST',
uri='https://identity.example.com/v3/auth/tokens',
headers={
'X-Subject-Token': self.getUniqueString('KeystoneToken')
},
headers={'X-Subject-Token': self.getUniqueString('KeystoneToken')},
json=self.os_fixture.v3_token,
validate=dict(json={
'auth': {
'identity': {
'methods': ['password'],
'password': {
'user': {
'domain': {
'name': 'default',
},
'name': 'admin',
'password': 'password'
}
}
},
'scope': {
'project': {
'domain': {
'name': 'default'
validate=dict(
json={
'auth': {
'identity': {
'methods': ['password'],
'password': {
'user': {
'domain': {
'name': 'default',
},
'name': 'admin',
'password': 'password',
}
},
'name': project_name
}
},
'scope': {
'project': {
'domain': {'name': 'default'},
'name': project_name,
}
},
}
}
}),
),
)
def get_keystone_discovery(self):
@ -437,10 +541,12 @@ class TestCase(base.TestCase):
self.adapter = self.useFixture(rm_fixture.Fixture())
self.calls = []
self._uri_registry.clear()
self.__do_register_uris([
self.get_keystone_discovery(),
self.get_keystone_v3_token(),
])
self.__do_register_uris(
[
self.get_keystone_discovery(),
self.get_keystone_v3_token(),
]
)
self._make_test_cloud(identity_api_version='3')
def use_keystone_v2(self):
@ -448,119 +554,171 @@ class TestCase(base.TestCase):
self.calls = []
self._uri_registry.clear()
self.__do_register_uris([
self.get_keystone_discovery(),
dict(method='POST',
uri='https://identity.example.com/v2.0/tokens',
json=self.os_fixture.v2_token,
),
])
self.__do_register_uris(
[
self.get_keystone_discovery(),
dict(
method='POST',
uri='https://identity.example.com/v2.0/tokens',
json=self.os_fixture.v2_token,
),
]
)
self._make_test_cloud(cloud_name='_test_cloud_v2_',
identity_api_version='2.0')
self._make_test_cloud(
cloud_name='_test_cloud_v2_', identity_api_version='2.0'
)
def _make_test_cloud(self, cloud_name='_test_cloud_', **kwargs):
test_cloud = os.environ.get('OPENSTACKSDK_OS_CLOUD', cloud_name)
self.cloud_config = self.config.get_one(
cloud=test_cloud, validate=True, **kwargs)
cloud=test_cloud, validate=True, **kwargs
)
self.cloud = openstack.connection.Connection(
config=self.cloud_config, strict=self.strict_cloud)
config=self.cloud_config, strict=self.strict_cloud
)
def get_cinder_discovery_mock_dict(
self,
block_storage_version_json='block-storage-version.json',
block_storage_discovery_url='https://block-storage.example.com/'):
self,
block_storage_version_json='block-storage-version.json',
block_storage_discovery_url='https://block-storage.example.com/',
):
discovery_fixture = os.path.join(
self.fixtures_directory, block_storage_version_json)
return dict(method='GET', uri=block_storage_discovery_url,
text=open(discovery_fixture, 'r').read())
self.fixtures_directory, block_storage_version_json
)
return dict(
method='GET',
uri=block_storage_discovery_url,
text=open(discovery_fixture, 'r').read(),
)
def get_glance_discovery_mock_dict(
self,
image_version_json='image-version.json',
image_discovery_url='https://image.example.com/'):
self,
image_version_json='image-version.json',
image_discovery_url='https://image.example.com/',
):
discovery_fixture = os.path.join(
self.fixtures_directory, image_version_json)
return dict(method='GET', uri=image_discovery_url,
status_code=300,
text=open(discovery_fixture, 'r').read())
self.fixtures_directory, image_version_json
)
return dict(
method='GET',
uri=image_discovery_url,
status_code=300,
text=open(discovery_fixture, 'r').read(),
)
def get_nova_discovery_mock_dict(
self,
compute_version_json='compute-version.json',
compute_discovery_url='https://compute.example.com/v2.1/'):
self,
compute_version_json='compute-version.json',
compute_discovery_url='https://compute.example.com/v2.1/',
):
discovery_fixture = os.path.join(
self.fixtures_directory, compute_version_json)
self.fixtures_directory, compute_version_json
)
return dict(
method='GET',
uri=compute_discovery_url,
text=open(discovery_fixture, 'r').read())
text=open(discovery_fixture, 'r').read(),
)
def get_placement_discovery_mock_dict(
self, discovery_fixture='placement.json'):
self, discovery_fixture='placement.json'
):
discovery_fixture = os.path.join(
self.fixtures_directory, discovery_fixture)
return dict(method='GET', uri="https://placement.example.com/",
text=open(discovery_fixture, 'r').read())
self.fixtures_directory, discovery_fixture
)
return dict(
method='GET',
uri="https://placement.example.com/",
text=open(discovery_fixture, 'r').read(),
)
def get_designate_discovery_mock_dict(self):
discovery_fixture = os.path.join(
self.fixtures_directory, "dns.json")
return dict(method='GET', uri="https://dns.example.com/",
text=open(discovery_fixture, 'r').read())
discovery_fixture = os.path.join(self.fixtures_directory, "dns.json")
return dict(
method='GET',
uri="https://dns.example.com/",
text=open(discovery_fixture, 'r').read(),
)
def get_ironic_discovery_mock_dict(self):
discovery_fixture = os.path.join(
self.fixtures_directory, "baremetal.json")
return dict(method='GET', uri="https://baremetal.example.com/",
text=open(discovery_fixture, 'r').read())
self.fixtures_directory, "baremetal.json"
)
return dict(
method='GET',
uri="https://baremetal.example.com/",
text=open(discovery_fixture, 'r').read(),
)
def get_senlin_discovery_mock_dict(self):
discovery_fixture = os.path.join(
self.fixtures_directory, "clustering.json")
return dict(method='GET', uri="https://clustering.example.com/",
text=open(discovery_fixture, 'r').read())
self.fixtures_directory, "clustering.json"
)
return dict(
method='GET',
uri="https://clustering.example.com/",
text=open(discovery_fixture, 'r').read(),
)
def use_compute_discovery(
self, compute_version_json='compute-version.json',
compute_discovery_url='https://compute.example.com/v2.1/'):
self.__do_register_uris([
self.get_nova_discovery_mock_dict(
compute_version_json, compute_discovery_url),
])
self,
compute_version_json='compute-version.json',
compute_discovery_url='https://compute.example.com/v2.1/',
):
self.__do_register_uris(
[
self.get_nova_discovery_mock_dict(
compute_version_json, compute_discovery_url
),
]
)
def get_cyborg_discovery_mock_dict(self):
discovery_fixture = os.path.join(
self.fixtures_directory, "accelerator.json")
return dict(method='GET', uri="https://accelerator.example.com/",
text=open(discovery_fixture, 'r').read())
self.fixtures_directory, "accelerator.json"
)
return dict(
method='GET',
uri="https://accelerator.example.com/",
text=open(discovery_fixture, 'r').read(),
)
def get_manila_discovery_mock_dict(self):
discovery_fixture = os.path.join(
self.fixtures_directory, "shared-file-system.json")
return dict(method='GET',
uri="https://shared-file-system.example.com/",
text=open(discovery_fixture, 'r').read())
self.fixtures_directory, "shared-file-system.json"
)
return dict(
method='GET',
uri="https://shared-file-system.example.com/",
text=open(discovery_fixture, 'r').read(),
)
def use_glance(
self, image_version_json='image-version.json',
image_discovery_url='https://image.example.com/'):
self,
image_version_json='image-version.json',
image_discovery_url='https://image.example.com/',
):
# NOTE(notmorgan): This method is only meant to be used in "setUp"
# where the ordering of the url being registered is tightly controlled
# if the functionality of .use_glance is meant to be used during an
# actual test case, use .get_glance_discovery_mock and apply to the
# right location in the mock_uris when calling .register_uris
self.__do_register_uris([
self.get_glance_discovery_mock_dict(
image_version_json, image_discovery_url)])
self.__do_register_uris(
[
self.get_glance_discovery_mock_dict(
image_version_json, image_discovery_url
)
]
)
def use_cinder(self):
self.__do_register_uris([
self.get_cinder_discovery_mock_dict()])
self.__do_register_uris([self.get_cinder_discovery_mock_dict()])
def use_placement(self, **kwargs):
self.__do_register_uris([
self.get_placement_discovery_mock_dict(**kwargs)])
self.__do_register_uris(
[self.get_placement_discovery_mock_dict(**kwargs)]
)
def use_designate(self):
# NOTE(slaweq): This method is only meant to be used in "setUp"
@ -568,8 +726,7 @@ class TestCase(base.TestCase):
# if the functionality of .use_designate is meant to be used during an
# actual test case, use .get_designate_discovery_mock and apply to the
# right location in the mock_uris when calling .register_uris
self.__do_register_uris([
self.get_designate_discovery_mock_dict()])
self.__do_register_uris([self.get_designate_discovery_mock_dict()])
def use_ironic(self):
# NOTE(TheJulia): This method is only meant to be used in "setUp"
@ -577,8 +734,7 @@ class TestCase(base.TestCase):
# if the functionality of .use_ironic is meant to be used during an
# actual test case, use .get_ironic_discovery_mock and apply to the
# right location in the mock_uris when calling .register_uris
self.__do_register_uris([
self.get_ironic_discovery_mock_dict()])
self.__do_register_uris([self.get_ironic_discovery_mock_dict()])
def use_senlin(self):
# NOTE(elachance): This method is only meant to be used in "setUp"
@ -586,8 +742,7 @@ class TestCase(base.TestCase):
# if the functionality of .use_senlin is meant to be used during an
# actual test case, use .get_senlin_discovery_mock and apply to the
# right location in the mock_uris when calling .register_uris
self.__do_register_uris([
self.get_senlin_discovery_mock_dict()])
self.__do_register_uris([self.get_senlin_discovery_mock_dict()])
def use_cyborg(self):
# NOTE(s_shogo): This method is only meant to be used in "setUp"
@ -595,8 +750,7 @@ class TestCase(base.TestCase):
# if the functionality of .use_cyborg is meant to be used during an
# actual test case, use .get_cyborg_discovery_mock and apply to the
# right location in the mock_uris when calling .register_uris
self.__do_register_uris([
self.get_cyborg_discovery_mock_dict()])
self.__do_register_uris([self.get_cyborg_discovery_mock_dict()])
def use_manila(self):
# NOTE(gouthamr): This method is only meant to be used in "setUp"
@ -604,8 +758,7 @@ class TestCase(base.TestCase):
# if the functionality of .use_manila is meant to be used during an
# actual test case, use .get_manila_discovery_mock and apply to the
# right location in the mock_uris when calling .register_uris
self.__do_register_uris([
self.get_manila_discovery_mock_dict()])
self.__do_register_uris([self.get_manila_discovery_mock_dict()])
def register_uris(self, uri_mock_list=None):
"""Mock a list of URIs and responses via requests mock.
@ -645,10 +798,11 @@ class TestCase(base.TestCase):
def __do_register_uris(self, uri_mock_list=None):
for to_mock in uri_mock_list:
kw_params = {k: to_mock.pop(k)
for k in ('request_headers', 'complete_qs',
'_real_http')
if k in to_mock}
kw_params = {
k: to_mock.pop(k)
for k in ('request_headers', 'complete_qs', '_real_http')
if k in to_mock
}
method = to_mock.pop('method')
uri = to_mock.pop('uri')
@ -656,44 +810,51 @@ class TestCase(base.TestCase):
# case "|" is used so that the split can be a bit easier on
# maintainers of this code.
key = '{method}|{uri}|{params}'.format(
method=method, uri=uri, params=kw_params)
method=method, uri=uri, params=kw_params
)
validate = to_mock.pop('validate', {})
valid_keys = set(['json', 'headers', 'params', 'data'])
invalid_keys = set(validate.keys()) - valid_keys
if invalid_keys:
raise TypeError(
"Invalid values passed to validate: {keys}".format(
keys=invalid_keys))
headers = structures.CaseInsensitiveDict(to_mock.pop('headers',
{}))
keys=invalid_keys
)
)
headers = structures.CaseInsensitiveDict(
to_mock.pop('headers', {})
)
if 'content-type' not in headers:
headers[u'content-type'] = 'application/json'
if 'exc' not in to_mock:
to_mock['headers'] = headers
self.calls += [
dict(
method=method,
url=uri, **validate)
]
self.calls += [dict(method=method, url=uri, **validate)]
self._uri_registry.setdefault(
key, {'response_list': [], 'kw_params': kw_params})
key, {'response_list': [], 'kw_params': kw_params}
)
if self._uri_registry[key]['kw_params'] != kw_params:
raise AssertionError(
'PROGRAMMING ERROR: key-word-params '
'should be part of the uri_key and cannot change, '
'it will affect the matcher in requests_mock. '
'%(old)r != %(new)r' %
{'old': self._uri_registry[key]['kw_params'],
'new': kw_params})
'%(old)r != %(new)r'
% {
'old': self._uri_registry[key]['kw_params'],
'new': kw_params,
}
)
self._uri_registry[key]['response_list'].append(to_mock)
for mocked, params in self._uri_registry.items():
mock_method, mock_uri, _ignored = mocked.split('|', 2)
self.adapter.register_uri(
mock_method, mock_uri, params['response_list'],
**params['kw_params'])
mock_method,
mock_uri,
params['response_list'],
**params['kw_params']
)
def assert_no_calls(self):
# TODO(mordred) For now, creating the adapter for self.conn is
@ -704,46 +865,65 @@ class TestCase(base.TestCase):
def assert_calls(self, stop_after=None, do_count=True):
for (x, (call, history)) in enumerate(
zip(self.calls, self.adapter.request_history)):
zip(self.calls, self.adapter.request_history)
):
if stop_after and x > stop_after:
break
call_uri_parts = urllib.parse.urlparse(call['url'])
history_uri_parts = urllib.parse.urlparse(history.url)
self.assertEqual(
(call['method'], call_uri_parts.scheme, call_uri_parts.netloc,
call_uri_parts.path, call_uri_parts.params,
urllib.parse.parse_qs(call_uri_parts.query)),
(history.method, history_uri_parts.scheme,
history_uri_parts.netloc, history_uri_parts.path,
history_uri_parts.params,
urllib.parse.parse_qs(history_uri_parts.query)),
('REST mismatch on call %(index)d. Expected %(call)r. '
'Got %(history)r). '
'NOTE: query string order differences wont cause mismatch' %
{
'index': x,
'call': '{method} {url}'.format(method=call['method'],
url=call['url']),
'history': '{method} {url}'.format(
method=history.method,
url=history.url)})
(
call['method'],
call_uri_parts.scheme,
call_uri_parts.netloc,
call_uri_parts.path,
call_uri_parts.params,
urllib.parse.parse_qs(call_uri_parts.query),
),
(
history.method,
history_uri_parts.scheme,
history_uri_parts.netloc,
history_uri_parts.path,
history_uri_parts.params,
urllib.parse.parse_qs(history_uri_parts.query),
),
(
'REST mismatch on call %(index)d. Expected %(call)r. '
'Got %(history)r). '
'NOTE: query string order differences wont cause mismatch'
% {
'index': x,
'call': '{method} {url}'.format(
method=call['method'], url=call['url']
),
'history': '{method} {url}'.format(
method=history.method, url=history.url
),
}
),
)
if 'json' in call:
self.assertEqual(
call['json'], history.json(),
'json content mismatch in call {index}'.format(index=x))
call['json'],
history.json(),
'json content mismatch in call {index}'.format(index=x),
)
# headers in a call isn't exhaustive - it's checking to make sure
# a specific header or headers are there, not that they are the
# only headers
if 'headers' in call:
for key, value in call['headers'].items():
self.assertEqual(
value, history.headers[key],
'header mismatch in call {index}'.format(index=x))
value,
history.headers[key],
'header mismatch in call {index}'.format(index=x),
)
if do_count:
self.assertEqual(
len(self.calls), len(self.adapter.request_history))
len(self.calls), len(self.adapter.request_history)
)
def assertResourceEqual(self, actual, expected, resource_type):
"""Helper for the assertEqual which compares Resource object against
@ -756,7 +936,7 @@ class TestCase(base.TestCase):
"""
return self.assertEqual(
resource_type(**expected).to_dict(computed=False),
actual.to_dict(computed=False)
actual.to_dict(computed=False),
)
def assertResourceListEqual(self, actual, expected, resource_type):
@ -771,12 +951,11 @@ class TestCase(base.TestCase):
"""
self.assertEqual(
[resource_type(**f).to_dict(computed=False) for f in expected],
[f.to_dict(computed=False) for f in actual]
[f.to_dict(computed=False) for f in actual],
)
class IronicTestCase(TestCase):
def setUp(self):
super(IronicTestCase, self).setUp()
self.use_ironic()

View File

@ -706,7 +706,9 @@ class TestCreateServer(base.TestCase):
]
)
self.cloud.create_server(
'server-name', dict(id='image-id'), dict(id='flavor-id'),
'server-name',
dict(id='image-id'),
dict(id='flavor-id'),
wait=True,
),

View File

@ -23,7 +23,6 @@ IDENTIFIER = 'IDENTIFIER'
class TestMetadata(base.TestCase):
def setUp(self):
super(TestMetadata, self).setUp()
@ -95,8 +94,7 @@ class TestMetadata(base.TestCase):
self.assertEqual(res, result)
url = self.base_path + '/' + res.id + '/metadata'
self.session.post.assert_called_once_with(
url,
json={'metadata': {'foo': 'bar'}}
url, json={'metadata': {'foo': 'bar'}}
)
def test_replace_metadata(self):
@ -109,8 +107,7 @@ class TestMetadata(base.TestCase):
self.assertEqual(res, result)
url = self.base_path + '/' + res.id + '/metadata'
self.session.put.assert_called_once_with(
url,
json={'metadata': {'foo': 'bar'}}
url, json={'metadata': {'foo': 'bar'}}
)
def test_delete_all_metadata(self):
@ -125,9 +122,7 @@ class TestMetadata(base.TestCase):
# Check passed resource is returned
self.assertEqual(res, result)
url = self.base_path + '/' + res.id + '/metadata'
self.session.put.assert_called_once_with(
url,
json={'metadata': {}})
self.session.put.assert_called_once_with(url, json={'metadata': {}})
def test_get_metadata_item(self):
res = self.sot
@ -198,5 +193,5 @@ class TestMetadata(base.TestCase):
self.assertEqual(res, result)
url = self.base_path + '/' + res.id + '/metadata/foo'
self.session.put.assert_called_once_with(
url,
json={'meta': {'foo': 'black'}})
url, json={'meta': {'foo': 'black'}}
)

View File

@ -25,26 +25,13 @@ BASIC_EXAMPLE = {
}
USAGE_EXAMPLE = {
"backup_gigabytes": {
"in_use": 0,
"limit": 1000,
"reserved": 0
},
"backups": {
"in_use": 0,
"limit": 10,
"reserved": 0
},
"gigabytes___DEFAULT__": {
"in_use": 0,
"limit": -1,
"reserved": 0
}
"backup_gigabytes": {"in_use": 0, "limit": 1000, "reserved": 0},
"backups": {"in_use": 0, "limit": 10, "reserved": 0},
"gigabytes___DEFAULT__": {"in_use": 0, "limit": -1, "reserved": 0},
}
class TestQuotaSet(base.TestCase):
def setUp(self):
super(TestQuotaSet, self).setUp()
self.sess = mock.Mock(spec=adapter.Adapter)
@ -64,10 +51,9 @@ class TestQuotaSet(base.TestCase):
self.assertTrue(sot.allow_commit)
self.assertDictEqual(
{"usage": "usage",
"limit": "limit",
"marker": "marker"},
sot._query_mapping._mapping)
{"usage": "usage", "limit": "limit", "marker": "marker"},
sot._query_mapping._mapping,
)
def test_make_basic(self):
sot = _qs.QuotaSet(**BASIC_EXAMPLE)
@ -87,10 +73,8 @@ class TestQuotaSet(base.TestCase):
sot.fetch(self.sess)
self.sess.get.assert_called_with(
'/os-quota-sets/proj',
microversion=1,
params={},
skip_cache=False)
'/os-quota-sets/proj', microversion=1, params={}, skip_cache=False
)
self.assertEqual(BASIC_EXAMPLE['backups'], sot.backups)
self.assertEqual({}, sot.reservation)
@ -112,11 +96,10 @@ class TestQuotaSet(base.TestCase):
'/os-quota-sets/proj',
microversion=1,
params={'usage': True},
skip_cache=False)
skip_cache=False,
)
self.assertEqual(
USAGE_EXAMPLE['backups']['limit'],
sot.backups)
self.assertEqual(USAGE_EXAMPLE['backups']['limit'], sot.backups)
def test_update_quota(self):
# Use QuotaSet as if it was returned by get(usage=True)
@ -124,7 +107,8 @@ class TestQuotaSet(base.TestCase):
project_id='proj',
reservation={'a': 'b'},
usage={'c': 'd'},
foo='bar')
foo='bar',
)
resp = mock.Mock()
resp.body = {'quota_set': copy.deepcopy(BASIC_EXAMPLE)}
@ -133,10 +117,7 @@ class TestQuotaSet(base.TestCase):
resp.headers = {}
self.sess.put = mock.Mock(return_value=resp)
sot._update(
reservation={'b': 'd'},
backups=15,
something_else=20)
sot._update(reservation={'b': 'd'}, backups=15, something_else=20)
sot.commit(self.sess)
@ -144,12 +125,8 @@ class TestQuotaSet(base.TestCase):
'/os-quota-sets/proj',
microversion=1,
headers={},
json={
'quota_set': {
'backups': 15,
'something_else': 20
}
})
json={'quota_set': {'backups': 15, 'something_else': 20}},
)
def test_delete_quota(self):
# Use QuotaSet as if it was returned by get(usage=True)
@ -157,7 +134,8 @@ class TestQuotaSet(base.TestCase):
project_id='proj',
reservation={'a': 'b'},
usage={'c': 'd'},
foo='bar')
foo='bar',
)
resp = mock.Mock()
resp.body = None

View File

@ -21,7 +21,6 @@ from openstack.tests.unit.test_resource import FakeResponse
class TestTagMixin(base.TestCase):
def setUp(self):
super(TestTagMixin, self).setUp()
@ -94,10 +93,7 @@ class TestTagMixin(base.TestCase):
# Check the passed resource is returned
self.assertEqual(res, result)
url = self.base_path + '/' + res.id + '/tags'
sess.put.assert_called_once_with(
url,
json={'tags': ['blue', 'green']}
)
sess.put.assert_called_once_with(url, json={'tags': ['blue', 'green']})
def test_remove_all_tags(self):
res = self.sot

View File

@ -48,10 +48,7 @@ USER_CONF = {
'force_ipv4': True,
},
'metrics': {
'statsd': {
'host': '127.0.0.1',
'port': '1234'
},
'statsd': {'host': '127.0.0.1', 'port': '1234'},
'influxdb': {
'host': '127.0.0.1',
'port': '1234',
@ -61,7 +58,7 @@ USER_CONF = {
'database': 'database',
'measurement': 'measurement.name',
'timeout': 10,
}
},
},
'clouds': {
'_test-cloud_': {
@ -112,30 +109,37 @@ USER_CONF = {
'domain_id': '6789',
'project_domain_id': '123456789',
},
'networks': [{
'name': 'a-public',
'routes_externally': True,
'nat_source': True,
}, {
'name': 'another-public',
'routes_externally': True,
'default_interface': True,
}, {
'name': 'a-private',
'routes_externally': False,
}, {
'name': 'another-private',
'routes_externally': False,
'nat_destination': True,
}, {
'name': 'split-default',
'routes_externally': True,
'routes_ipv4_externally': False,
}, {
'name': 'split-no-default',
'routes_ipv6_externally': False,
'routes_ipv4_externally': True,
}],
'networks': [
{
'name': 'a-public',
'routes_externally': True,
'nat_source': True,
},
{
'name': 'another-public',
'routes_externally': True,
'default_interface': True,
},
{
'name': 'a-private',
'routes_externally': False,
},
{
'name': 'another-private',
'routes_externally': False,
'nat_destination': True,
},
{
'name': 'split-default',
'routes_externally': True,
'routes_ipv4_externally': False,
},
{
'name': 'split-no-default',
'routes_ipv6_externally': False,
'routes_ipv4_externally': True,
},
],
'region_name': 'test-region',
},
'_test_cloud_regions': {
@ -150,13 +154,13 @@ USER_CONF = {
'name': 'region1',
'values': {
'external_network': 'region1-network',
}
},
},
{
'name': 'region2',
'values': {
'external_network': 'my-network',
}
},
},
{
'name': 'region-no-value',
@ -198,13 +202,13 @@ USER_CONF = {
'statsd': {
'host': '127.0.0.1',
'port': 4321,
'prefix': 'statsd.override.prefix'
'prefix': 'statsd.override.prefix',
},
'influxdb': {
'username': 'override-username',
'password': 'override-password',
'database': 'override-database',
}
},
},
},
},

View File

@ -40,7 +40,6 @@ fake_services_dict = {
class TestCloudRegion(base.TestCase):
def test_arbitrary_attributes(self):
cc = cloud_region.CloudRegion("test1", "region-al", fake_config_dict)
self.assertEqual("test1", cc.name)
@ -89,12 +88,10 @@ class TestCloudRegion(base.TestCase):
self.assertIsNone(cc._get_config('nothing', None))
# This is what is happening behind the scenes in get_default_interface.
self.assertEqual(
fake_services_dict['interface'],
cc._get_config('interface', None))
fake_services_dict['interface'], cc._get_config('interface', None)
)
# The same call as above, but from one step up the stack
self.assertEqual(
fake_services_dict['interface'],
cc.get_interface())
self.assertEqual(fake_services_dict['interface'], cc.get_interface())
# Which finally is what is called to populate the below
self.assertEqual('public', self.cloud.default_interface)
@ -150,16 +147,21 @@ class TestCloudRegion(base.TestCase):
def test_ipv6(self):
cc = cloud_region.CloudRegion(
"test1", "region-al", fake_config_dict, force_ipv4=True)
"test1", "region-al", fake_config_dict, force_ipv4=True
)
self.assertTrue(cc.force_ipv4)
def test_getters(self):
cc = cloud_region.CloudRegion("test1", "region-al", fake_services_dict)
self.assertEqual(['compute', 'identity', 'image', 'volume'],
sorted(cc.get_services()))
self.assertEqual({'password': 'hunter2', 'username': 'AzureDiamond'},
cc.get_auth_args())
self.assertEqual(
['compute', 'identity', 'image', 'volume'],
sorted(cc.get_services()),
)
self.assertEqual(
{'password': 'hunter2', 'username': 'AzureDiamond'},
cc.get_auth_args(),
)
self.assertEqual('public', cc.get_interface())
self.assertEqual('public', cc.get_interface('compute'))
self.assertEqual('admin', cc.get_interface('identity'))
@ -170,8 +172,9 @@ class TestCloudRegion(base.TestCase):
self.assertEqual('compute', cc.get_service_type('compute'))
self.assertEqual('1', cc.get_api_version('volume'))
self.assertEqual('block-storage', cc.get_service_type('volume'))
self.assertEqual('http://compute.example.com',
cc.get_endpoint('compute'))
self.assertEqual(
'http://compute.example.com', cc.get_endpoint('compute')
)
self.assertIsNone(cc.get_endpoint('image'))
self.assertIsNone(cc.get_service_name('compute'))
self.assertEqual('locks', cc.get_service_name('identity'))
@ -184,38 +187,45 @@ class TestCloudRegion(base.TestCase):
# We're skipping loader here, so we have to expand relevant
# parts from the rackspace profile. The thing we're testing
# is that the project_id logic works.
cc = cloud_region.CloudRegion("test1", "DFW", {
'profile': 'rackspace',
'region_name': 'DFW',
'auth': {'project_id': '123456'},
'block_storage_endpoint_override': 'https://example.com/v2/',
})
cc = cloud_region.CloudRegion(
"test1",
"DFW",
{
'profile': 'rackspace',
'region_name': 'DFW',
'auth': {'project_id': '123456'},
'block_storage_endpoint_override': 'https://example.com/v2/',
},
)
self.assertEqual(
'https://example.com/v2/123456',
cc.get_endpoint('block-storage')
'https://example.com/v2/123456', cc.get_endpoint('block-storage')
)
def test_rackspace_workaround_only_rax(self):
cc = cloud_region.CloudRegion("test1", "DFW", {
'region_name': 'DFW',
'auth': {'project_id': '123456'},
'block_storage_endpoint_override': 'https://example.com/v2/',
})
cc = cloud_region.CloudRegion(
"test1",
"DFW",
{
'region_name': 'DFW',
'auth': {'project_id': '123456'},
'block_storage_endpoint_override': 'https://example.com/v2/',
},
)
self.assertEqual(
'https://example.com/v2/',
cc.get_endpoint('block-storage')
'https://example.com/v2/', cc.get_endpoint('block-storage')
)
def test_get_region_name(self):
def assert_region_name(default, compute):
self.assertEqual(default, cc.region_name)
self.assertEqual(default, cc.get_region_name())
self.assertEqual(default, cc.get_region_name(service_type=None))
self.assertEqual(
compute, cc.get_region_name(service_type='compute'))
compute, cc.get_region_name(service_type='compute')
)
self.assertEqual(
default, cc.get_region_name(service_type='placement'))
default, cc.get_region_name(service_type='placement')
)
# No region_name kwarg, no regions specified in services dict
# (including the default).
@ -224,14 +234,17 @@ class TestCloudRegion(base.TestCase):
# Only region_name kwarg; it's returned for everything
cc = cloud_region.CloudRegion(
region_name='foo', config=fake_services_dict)
region_name='foo', config=fake_services_dict
)
assert_region_name('foo', 'foo')
# No region_name kwarg; values (including default) show through from
# config dict
services_dict = dict(
fake_services_dict,
region_name='the-default', compute_region_name='compute-region')
region_name='the-default',
compute_region_name='compute-region',
)
cc = cloud_region.CloudRegion(config=services_dict)
assert_region_name('the-default', 'compute-region')
@ -239,9 +252,12 @@ class TestCloudRegion(base.TestCase):
# compatibility), but service-specific region_name takes precedence.
services_dict = dict(
fake_services_dict,
region_name='dict', compute_region_name='compute-region')
region_name='dict',
compute_region_name='compute-region',
)
cc = cloud_region.CloudRegion(
region_name='kwarg', config=services_dict)
region_name='kwarg', config=services_dict
)
assert_region_name('kwarg', 'compute-region')
def test_aliases(self):
@ -265,9 +281,7 @@ class TestCloudRegion(base.TestCase):
config_dict = defaults.get_defaults()
config_dict.update(fake_services_dict)
cc = cloud_region.CloudRegion("test1", "region-al", config_dict)
self.assertRaises(
exceptions.ConfigException,
cc.get_session)
self.assertRaises(exceptions.ConfigException, cc.get_session)
@mock.patch.object(ksa_session, 'Session')
def test_get_session(self, mock_session):
@ -277,15 +291,21 @@ class TestCloudRegion(base.TestCase):
fake_session.additional_user_agent = []
mock_session.return_value = fake_session
cc = cloud_region.CloudRegion(
"test1", "region-al", config_dict, auth_plugin=mock.Mock())
"test1", "region-al", config_dict, auth_plugin=mock.Mock()
)
cc.get_session()
mock_session.assert_called_with(
auth=mock.ANY,
verify=True, cert=None, timeout=None, collect_timing=None,
discovery_cache=None)
verify=True,
cert=None,
timeout=None,
collect_timing=None,
discovery_cache=None,
)
self.assertEqual(
fake_session.additional_user_agent,
[('openstacksdk', openstack_version.__version__)])
[('openstacksdk', openstack_version.__version__)],
)
@mock.patch.object(ksa_session, 'Session')
def test_get_session_with_app_name(self, mock_session):
@ -297,18 +317,28 @@ class TestCloudRegion(base.TestCase):
fake_session.app_version = None
mock_session.return_value = fake_session
cc = cloud_region.CloudRegion(
"test1", "region-al", config_dict, auth_plugin=mock.Mock(),
app_name="test_app", app_version="test_version")
"test1",
"region-al",
config_dict,
auth_plugin=mock.Mock(),
app_name="test_app",
app_version="test_version",
)
cc.get_session()
mock_session.assert_called_with(
auth=mock.ANY,
verify=True, cert=None, timeout=None, collect_timing=None,
discovery_cache=None)
verify=True,
cert=None,
timeout=None,
collect_timing=None,
discovery_cache=None,
)
self.assertEqual(fake_session.app_name, "test_app")
self.assertEqual(fake_session.app_version, "test_version")
self.assertEqual(
fake_session.additional_user_agent,
[('openstacksdk', openstack_version.__version__)])
[('openstacksdk', openstack_version.__version__)],
)
@mock.patch.object(ksa_session, 'Session')
def test_get_session_with_timeout(self, mock_session):
@ -319,15 +349,21 @@ class TestCloudRegion(base.TestCase):
config_dict.update(fake_services_dict)
config_dict['api_timeout'] = 9
cc = cloud_region.CloudRegion(
"test1", "region-al", config_dict, auth_plugin=mock.Mock())
"test1", "region-al", config_dict, auth_plugin=mock.Mock()
)
cc.get_session()
mock_session.assert_called_with(
auth=mock.ANY,
verify=True, cert=None, timeout=9,
collect_timing=None, discovery_cache=None)
verify=True,
cert=None,
timeout=9,
collect_timing=None,
discovery_cache=None,
)
self.assertEqual(
fake_session.additional_user_agent,
[('openstacksdk', openstack_version.__version__)])
[('openstacksdk', openstack_version.__version__)],
)
@mock.patch.object(ksa_session, 'Session')
def test_get_session_with_timing(self, mock_session):
@ -338,35 +374,45 @@ class TestCloudRegion(base.TestCase):
config_dict.update(fake_services_dict)
config_dict['timing'] = True
cc = cloud_region.CloudRegion(
"test1", "region-al", config_dict, auth_plugin=mock.Mock())
"test1", "region-al", config_dict, auth_plugin=mock.Mock()
)
cc.get_session()
mock_session.assert_called_with(
auth=mock.ANY,
verify=True, cert=None, timeout=None,
collect_timing=True, discovery_cache=None)
verify=True,
cert=None,
timeout=None,
collect_timing=True,
discovery_cache=None,
)
self.assertEqual(
fake_session.additional_user_agent,
[('openstacksdk', openstack_version.__version__)])
[('openstacksdk', openstack_version.__version__)],
)
@mock.patch.object(ksa_session, 'Session')
def test_override_session_endpoint_override(self, mock_session):
config_dict = defaults.get_defaults()
config_dict.update(fake_services_dict)
cc = cloud_region.CloudRegion(
"test1", "region-al", config_dict, auth_plugin=mock.Mock())
"test1", "region-al", config_dict, auth_plugin=mock.Mock()
)
self.assertEqual(
cc.get_session_endpoint('compute'),
fake_services_dict['compute_endpoint_override'])
fake_services_dict['compute_endpoint_override'],
)
@mock.patch.object(ksa_session, 'Session')
def test_override_session_endpoint(self, mock_session):
config_dict = defaults.get_defaults()
config_dict.update(fake_services_dict)
cc = cloud_region.CloudRegion(
"test1", "region-al", config_dict, auth_plugin=mock.Mock())
"test1", "region-al", config_dict, auth_plugin=mock.Mock()
)
self.assertEqual(
cc.get_session_endpoint('telemetry'),
fake_services_dict['telemetry_endpoint'])
fake_services_dict['telemetry_endpoint'],
)
@mock.patch.object(cloud_region.CloudRegion, 'get_session')
def test_session_endpoint(self, mock_get_session):
@ -375,20 +421,23 @@ class TestCloudRegion(base.TestCase):
config_dict = defaults.get_defaults()
config_dict.update(fake_services_dict)
cc = cloud_region.CloudRegion(
"test1", "region-al", config_dict, auth_plugin=mock.Mock())
"test1", "region-al", config_dict, auth_plugin=mock.Mock()
)
cc.get_session_endpoint('orchestration')
mock_session.get_endpoint.assert_called_with(
interface='public',
service_name=None,
region_name='region-al',
service_type='orchestration')
service_type='orchestration',
)
@mock.patch.object(cloud_region.CloudRegion, 'get_session')
def test_session_endpoint_not_found(self, mock_get_session):
exc_to_raise = ksa_exceptions.catalog.EndpointNotFound
mock_get_session.return_value.get_endpoint.side_effect = exc_to_raise
cc = cloud_region.CloudRegion(
"test1", "region-al", {}, auth_plugin=mock.Mock())
"test1", "region-al", {}, auth_plugin=mock.Mock()
)
self.assertIsNone(cc.get_session_endpoint('notfound'))
def test_get_endpoint_from_catalog(self):
@ -396,14 +445,20 @@ class TestCloudRegion(base.TestCase):
self.cloud.config.config['dns_endpoint_override'] = dns_override
self.assertEqual(
'https://compute.example.com/v2.1/',
self.cloud.config.get_endpoint_from_catalog('compute'))
self.cloud.config.get_endpoint_from_catalog('compute'),
)
self.assertEqual(
'https://internal.compute.example.com/v2.1/',
self.cloud.config.get_endpoint_from_catalog(
'compute', interface='internal'))
'compute', interface='internal'
),
)
self.assertIsNone(
self.cloud.config.get_endpoint_from_catalog(
'compute', region_name='unknown-region'))
'compute', region_name='unknown-region'
)
)
self.assertEqual(
'https://dns.example.com',
self.cloud.config.get_endpoint_from_catalog('dns'))
self.cloud.config.get_endpoint_from_catalog('dns'),
)

File diff suppressed because it is too large Load Diff

View File

@ -21,52 +21,64 @@ from openstack.tests.unit.config import base
class TestEnviron(base.TestCase):
def setUp(self):
super(TestEnviron, self).setUp()
self.useFixture(
fixtures.EnvironmentVariable('OS_AUTH_URL', 'https://example.com'))
fixtures.EnvironmentVariable('OS_AUTH_URL', 'https://example.com')
)
self.useFixture(
fixtures.EnvironmentVariable('OS_USERNAME', 'testuser'))
fixtures.EnvironmentVariable('OS_USERNAME', 'testuser')
)
self.useFixture(
fixtures.EnvironmentVariable('OS_PASSWORD', 'testpass'))
fixtures.EnvironmentVariable('OS_PASSWORD', 'testpass')
)
self.useFixture(
fixtures.EnvironmentVariable('OS_PROJECT_NAME', 'testproject'))
fixtures.EnvironmentVariable('OS_PROJECT_NAME', 'testproject')
)
self.useFixture(
fixtures.EnvironmentVariable('NOVA_PROJECT_ID', 'testnova'))
fixtures.EnvironmentVariable('NOVA_PROJECT_ID', 'testnova')
)
def test_get_one(self):
c = config.OpenStackConfig(config_files=[self.cloud_yaml],
vendor_files=[self.vendor_yaml])
c = config.OpenStackConfig(
config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml]
)
self.assertIsInstance(c.get_one(), cloud_region.CloudRegion)
def test_no_fallthrough(self):
c = config.OpenStackConfig(config_files=[self.cloud_yaml],
vendor_files=[self.vendor_yaml])
self.assertRaises(
exceptions.ConfigException, c.get_one, 'openstack')
c = config.OpenStackConfig(
config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml]
)
self.assertRaises(exceptions.ConfigException, c.get_one, 'openstack')
def test_envvar_name_override(self):
self.useFixture(
fixtures.EnvironmentVariable('OS_CLOUD_NAME', 'override'))
c = config.OpenStackConfig(config_files=[self.cloud_yaml],
vendor_files=[self.vendor_yaml])
fixtures.EnvironmentVariable('OS_CLOUD_NAME', 'override')
)
c = config.OpenStackConfig(
config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml]
)
cc = c.get_one('override')
self._assert_cloud_details(cc)
def test_envvar_prefer_ipv6_override(self):
self.useFixture(
fixtures.EnvironmentVariable('OS_PREFER_IPV6', 'false'))
c = config.OpenStackConfig(config_files=[self.cloud_yaml],
vendor_files=[self.vendor_yaml],
secure_files=[self.secure_yaml])
fixtures.EnvironmentVariable('OS_PREFER_IPV6', 'false')
)
c = config.OpenStackConfig(
config_files=[self.cloud_yaml],
vendor_files=[self.vendor_yaml],
secure_files=[self.secure_yaml],
)
cc = c.get_one('_test-cloud_')
self.assertFalse(cc.prefer_ipv6)
def test_environ_exists(self):
c = config.OpenStackConfig(config_files=[self.cloud_yaml],
vendor_files=[self.vendor_yaml],
secure_files=[self.secure_yaml])
c = config.OpenStackConfig(
config_files=[self.cloud_yaml],
vendor_files=[self.vendor_yaml],
secure_files=[self.secure_yaml],
)
cc = c.get_one('envvars')
self._assert_cloud_details(cc)
self.assertNotIn('auth_url', cc.config)
@ -79,10 +91,12 @@ class TestEnviron(base.TestCase):
self._assert_cloud_details(cc)
def test_environ_prefix(self):
c = config.OpenStackConfig(config_files=[self.cloud_yaml],
vendor_files=[self.vendor_yaml],
envvar_prefix='NOVA_',
secure_files=[self.secure_yaml])
c = config.OpenStackConfig(
config_files=[self.cloud_yaml],
vendor_files=[self.vendor_yaml],
envvar_prefix='NOVA_',
secure_files=[self.secure_yaml],
)
cc = c.get_one('envvars')
self._assert_cloud_details(cc)
self.assertNotIn('auth_url', cc.config)
@ -95,9 +109,11 @@ class TestEnviron(base.TestCase):
self._assert_cloud_details(cc)
def test_get_one_with_config_files(self):
c = config.OpenStackConfig(config_files=[self.cloud_yaml],
vendor_files=[self.vendor_yaml],
secure_files=[self.secure_yaml])
c = config.OpenStackConfig(
config_files=[self.cloud_yaml],
vendor_files=[self.vendor_yaml],
secure_files=[self.secure_yaml],
)
self.assertIsInstance(c.cloud_config, dict)
self.assertIn('cache', c.cloud_config)
self.assertIsInstance(c.cloud_config['cache'], dict)
@ -111,40 +127,40 @@ class TestEnviron(base.TestCase):
def test_config_file_override(self):
self.useFixture(
fixtures.EnvironmentVariable(
'OS_CLIENT_CONFIG_FILE', self.cloud_yaml))
c = config.OpenStackConfig(config_files=[],
vendor_files=[self.vendor_yaml])
'OS_CLIENT_CONFIG_FILE', self.cloud_yaml
)
)
c = config.OpenStackConfig(
config_files=[], vendor_files=[self.vendor_yaml]
)
cc = c.get_one('_test-cloud_')
self._assert_cloud_details(cc)
class TestEnvvars(base.TestCase):
def test_no_envvars(self):
self.useFixture(
fixtures.EnvironmentVariable('NOVA_USERNAME', 'nova'))
c = config.OpenStackConfig(config_files=[self.cloud_yaml],
vendor_files=[self.vendor_yaml])
self.assertRaises(
exceptions.ConfigException, c.get_one, 'envvars')
self.useFixture(fixtures.EnvironmentVariable('NOVA_USERNAME', 'nova'))
c = config.OpenStackConfig(
config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml]
)
self.assertRaises(exceptions.ConfigException, c.get_one, 'envvars')
def test_test_envvars(self):
self.useFixture(fixtures.EnvironmentVariable('NOVA_USERNAME', 'nova'))
self.useFixture(
fixtures.EnvironmentVariable('NOVA_USERNAME', 'nova'))
self.useFixture(
fixtures.EnvironmentVariable('OS_STDERR_CAPTURE', 'True'))
c = config.OpenStackConfig(config_files=[self.cloud_yaml],
vendor_files=[self.vendor_yaml])
self.assertRaises(
exceptions.ConfigException, c.get_one, 'envvars')
fixtures.EnvironmentVariable('OS_STDERR_CAPTURE', 'True')
)
c = config.OpenStackConfig(
config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml]
)
self.assertRaises(exceptions.ConfigException, c.get_one, 'envvars')
def test_incomplete_envvars(self):
self.useFixture(
fixtures.EnvironmentVariable('NOVA_USERNAME', 'nova'))
self.useFixture(
fixtures.EnvironmentVariable('OS_USERNAME', 'user'))
config.OpenStackConfig(config_files=[self.cloud_yaml],
vendor_files=[self.vendor_yaml])
self.useFixture(fixtures.EnvironmentVariable('NOVA_USERNAME', 'nova'))
self.useFixture(fixtures.EnvironmentVariable('OS_USERNAME', 'user'))
config.OpenStackConfig(
config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml]
)
# This is broken due to an issue that's fixed in a subsequent patch
# commenting it out in this patch to keep the patch size reasonable
# self.assertRaises(
@ -152,33 +168,38 @@ class TestEnvvars(base.TestCase):
# c.get_one, 'envvars')
def test_have_envvars(self):
self.useFixture(fixtures.EnvironmentVariable('NOVA_USERNAME', 'nova'))
self.useFixture(
fixtures.EnvironmentVariable('NOVA_USERNAME', 'nova'))
fixtures.EnvironmentVariable('OS_AUTH_URL', 'http://example.com')
)
self.useFixture(fixtures.EnvironmentVariable('OS_USERNAME', 'user'))
self.useFixture(
fixtures.EnvironmentVariable('OS_AUTH_URL', 'http://example.com'))
fixtures.EnvironmentVariable('OS_PASSWORD', 'password')
)
self.useFixture(
fixtures.EnvironmentVariable('OS_USERNAME', 'user'))
self.useFixture(
fixtures.EnvironmentVariable('OS_PASSWORD', 'password'))
self.useFixture(
fixtures.EnvironmentVariable('OS_PROJECT_NAME', 'project'))
c = config.OpenStackConfig(config_files=[self.cloud_yaml],
vendor_files=[self.vendor_yaml])
fixtures.EnvironmentVariable('OS_PROJECT_NAME', 'project')
)
c = config.OpenStackConfig(
config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml]
)
cc = c.get_one('envvars')
self.assertEqual(cc.config['auth']['username'], 'user')
def test_old_envvars(self):
self.useFixture(fixtures.EnvironmentVariable('NOVA_USERNAME', 'nova'))
self.useFixture(
fixtures.EnvironmentVariable('NOVA_USERNAME', 'nova'))
fixtures.EnvironmentVariable('NOVA_AUTH_URL', 'http://example.com')
)
self.useFixture(
fixtures.EnvironmentVariable(
'NOVA_AUTH_URL', 'http://example.com'))
fixtures.EnvironmentVariable('NOVA_PASSWORD', 'password')
)
self.useFixture(
fixtures.EnvironmentVariable('NOVA_PASSWORD', 'password'))
self.useFixture(
fixtures.EnvironmentVariable('NOVA_PROJECT_NAME', 'project'))
c = config.OpenStackConfig(config_files=[self.cloud_yaml],
vendor_files=[self.vendor_yaml],
envvar_prefix='NOVA_')
fixtures.EnvironmentVariable('NOVA_PROJECT_NAME', 'project')
)
c = config.OpenStackConfig(
config_files=[self.cloud_yaml],
vendor_files=[self.vendor_yaml],
envvar_prefix='NOVA_',
)
cc = c.get_one('envvars')
self.assertEqual(cc.config['auth']['username'], 'nova')

View File

@ -23,13 +23,15 @@ from openstack.tests.unit import base
class TestFromConf(base.TestCase):
def _get_conn(self, **from_conf_kwargs):
oslocfg = self._load_ks_cfg_opts()
# Throw name in here to prove **kwargs is working
config = cloud_region.from_conf(
oslocfg, session=self.cloud.session, name='from_conf.example.com',
**from_conf_kwargs)
oslocfg,
session=self.cloud.session,
name='from_conf.example.com',
**from_conf_kwargs
)
self.assertEqual('from_conf.example.com', config.name)
return connection.Connection(config=config, strict_proxies=True)
@ -41,33 +43,48 @@ class TestFromConf(base.TestCase):
discovery = {
"versions": {
"values": [
{"status": "stable",
"updated": "2019-06-01T00:00:00Z",
"media-types": [{
"base": "application/json",
"type": "application/vnd.openstack.heat-v2+json"}],
"id": "v2.0",
"links": [{
"href": "https://example.org:8888/heat/v2",
"rel": "self"}]
}]
{
"status": "stable",
"updated": "2019-06-01T00:00:00Z",
"media-types": [
{
"base": "application/json",
"type": "application/vnd.openstack.heat-v2+json", # noqa: E501
}
],
"id": "v2.0",
"links": [
{
"href": "https://example.org:8888/heat/v2",
"rel": "self",
}
],
}
]
}
}
self.register_uris([
dict(method='GET',
uri='https://example.org:8888/heat/v2',
json=discovery),
dict(method='GET',
uri='https://example.org:8888/heat/v2/foo',
json={'foo': {}}),
])
self.register_uris(
[
dict(
method='GET',
uri='https://example.org:8888/heat/v2',
json=discovery,
),
dict(
method='GET',
uri='https://example.org:8888/heat/v2/foo',
json={'foo': {}},
),
]
)
adap = conn.orchestration
self.assertEqual('SpecialRegion', adap.region_name)
self.assertEqual('orchestration', adap.service_type)
self.assertEqual('internal', adap.interface)
self.assertEqual('https://example.org:8888/heat/v2',
adap.endpoint_override)
self.assertEqual(
'https://example.org:8888/heat/v2', adap.endpoint_override
)
adap.get('/foo')
self.assert_calls()
@ -80,13 +97,18 @@ class TestFromConf(base.TestCase):
server_name = self.getUniqueString('name')
fake_server = fakes.make_fake_server(server_id, server_name)
self.register_uris([
self.get_nova_discovery_mock_dict(),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'detail']),
json={'servers': [fake_server]}),
])
self.register_uris(
[
self.get_nova_discovery_mock_dict(),
dict(
method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'detail']
),
json={'servers': [fake_server]},
),
]
)
# Nova has empty adapter config, so these default
adap = conn.compute
@ -108,20 +130,27 @@ class TestFromConf(base.TestCase):
server_name = self.getUniqueString('name')
fake_server = fakes.make_fake_server(server_id, server_name)
self.register_uris([
dict(method='GET',
uri='https://compute.example.com/v2.1/',
exc=requests.exceptions.ConnectionError),
self.get_nova_discovery_mock_dict(),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'detail']),
json={'servers': [fake_server]}),
])
self.register_uris(
[
dict(
method='GET',
uri='https://compute.example.com/v2.1/',
exc=requests.exceptions.ConnectionError,
),
self.get_nova_discovery_mock_dict(),
dict(
method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'detail']
),
json={'servers': [fake_server]},
),
]
)
self.assertRaises(
exceptions.ServiceDiscoveryException,
getattr, conn, 'compute')
exceptions.ServiceDiscoveryException, getattr, conn, 'compute'
)
# Nova has empty adapter config, so these default
adap = conn.compute
@ -141,31 +170,41 @@ class TestFromConf(base.TestCase):
discovery = {
"versions": {
"values": [
{"status": "stable",
"id": "v1",
"links": [{
"href": "https://example.org:5050/v1",
"rel": "self"}]
}]
{
"status": "stable",
"id": "v1",
"links": [
{
"href": "https://example.org:5050/v1",
"rel": "self",
}
],
}
]
}
}
status = {
'finished': True,
'error': None
}
self.register_uris([
dict(method='GET',
uri='https://example.org:5050',
json=discovery),
# strict-proxies means we're going to fetch the discovery
# doc from the versioned endpoint to verify it works.
dict(method='GET',
uri='https://example.org:5050/v1',
json=discovery),
dict(method='GET',
uri='https://example.org:5050/v1/introspection/abcd',
json=status),
])
status = {'finished': True, 'error': None}
self.register_uris(
[
dict(
method='GET',
uri='https://example.org:5050',
json=discovery,
),
# strict-proxies means we're going to fetch the discovery
# doc from the versioned endpoint to verify it works.
dict(
method='GET',
uri='https://example.org:5050/v1',
json=discovery,
),
dict(
method='GET',
uri='https://example.org:5050/v1/introspection/abcd',
json=status,
),
]
)
adap = conn.baremetal_introspection
self.assertEqual('baremetal-introspection', adap.service_type)
@ -180,38 +219,53 @@ class TestFromConf(base.TestCase):
discovery = {
"versions": {
"values": [
{"status": "stable",
"id": "v1",
"links": [{
"href": "https://example.org:5050/v1",
"rel": "self"}]
}]
{
"status": "stable",
"id": "v1",
"links": [
{
"href": "https://example.org:5050/v1",
"rel": "self",
}
],
}
]
}
}
status = {
'finished': True,
'error': None
}
self.register_uris([
dict(method='GET',
uri='https://example.org:5050',
exc=requests.exceptions.ConnectTimeout),
dict(method='GET',
uri='https://example.org:5050',
json=discovery),
# strict-proxies means we're going to fetch the discovery
# doc from the versioned endpoint to verify it works.
dict(method='GET',
uri='https://example.org:5050/v1',
json=discovery),
dict(method='GET',
uri='https://example.org:5050/v1/introspection/abcd',
json=status),
])
status = {'finished': True, 'error': None}
self.register_uris(
[
dict(
method='GET',
uri='https://example.org:5050',
exc=requests.exceptions.ConnectTimeout,
),
dict(
method='GET',
uri='https://example.org:5050',
json=discovery,
),
# strict-proxies means we're going to fetch the discovery
# doc from the versioned endpoint to verify it works.
dict(
method='GET',
uri='https://example.org:5050/v1',
json=discovery,
),
dict(
method='GET',
uri='https://example.org:5050/v1/introspection/abcd',
json=status,
),
]
)
self.assertRaises(
exceptions.ServiceDiscoveryException,
getattr, conn, 'baremetal_introspection')
getattr,
conn,
'baremetal_introspection',
)
adap = conn.baremetal_introspection
self.assertEqual('baremetal-introspection', adap.service_type)
@ -220,16 +274,21 @@ class TestFromConf(base.TestCase):
self.assertTrue(adap.get_introspection('abcd').is_finished)
def assert_service_disabled(self, service_type, expected_reason,
**from_conf_kwargs):
def assert_service_disabled(
self, service_type, expected_reason, **from_conf_kwargs
):
conn = self._get_conn(**from_conf_kwargs)
# The _ServiceDisabledProxyShim loads up okay...
adap = getattr(conn, service_type)
# ...but freaks out if you try to use it.
ex = self.assertRaises(
exceptions.ServiceDisabledException, getattr, adap, 'get')
self.assertIn("Service '%s' is disabled because its configuration "
"could not be loaded." % service_type, ex.message)
exceptions.ServiceDisabledException, getattr, adap, 'get'
)
self.assertIn(
"Service '%s' is disabled because its configuration "
"could not be loaded." % service_type,
ex.message,
)
self.assertIn(expected_reason, ex.message)
def test_no_such_conf_section(self):
@ -238,15 +297,18 @@ class TestFromConf(base.TestCase):
self.assert_service_disabled(
'orchestration',
"No section for project 'heat' (service type 'orchestration') was "
"present in the config.")
"present in the config.",
)
def test_no_such_conf_section_ignore_service_type(self):
"""Ignore absent conf section if service type not requested."""
del self.oslo_config_dict['heat']
self.assert_service_disabled(
'orchestration', "Not in the list of requested service_types.",
'orchestration',
"Not in the list of requested service_types.",
# 'orchestration' absent from this list
service_types=['compute'])
service_types=['compute'],
)
def test_no_adapter_opts(self):
"""Conf section present, but opts for service type not registered."""
@ -254,15 +316,18 @@ class TestFromConf(base.TestCase):
self.assert_service_disabled(
'orchestration',
"Encountered an exception attempting to process config for "
"project 'heat' (service type 'orchestration'): no such option")
"project 'heat' (service type 'orchestration'): no such option",
)
def test_no_adapter_opts_ignore_service_type(self):
"""Ignore unregistered conf section if service type not requested."""
self.oslo_config_dict['heat'] = None
self.assert_service_disabled(
'orchestration', "Not in the list of requested service_types.",
'orchestration',
"Not in the list of requested service_types.",
# 'orchestration' absent from this list
service_types=['compute'])
service_types=['compute'],
)
def test_invalid_adapter_opts(self):
"""Adapter opts are bogus, in exception-raising ways."""
@ -274,24 +339,31 @@ class TestFromConf(base.TestCase):
'orchestration',
"Encountered an exception attempting to process config for "
"project 'heat' (service type 'orchestration'): interface and "
"valid_interfaces are mutually exclusive.")
"valid_interfaces are mutually exclusive.",
)
def test_no_session(self):
# TODO(efried): Currently calling without a Session is not implemented.
self.assertRaises(exceptions.ConfigException,
cloud_region.from_conf, self._load_ks_cfg_opts())
self.assertRaises(
exceptions.ConfigException,
cloud_region.from_conf,
self._load_ks_cfg_opts(),
)
def test_no_endpoint(self):
"""Conf contains adapter opts, but service type not in catalog."""
self.os_fixture.v3_token.remove_service('monitoring')
conn = self._get_conn()
# Monasca is not in the service catalog
self.assertRaises(ks_exc.catalog.EndpointNotFound,
getattr, conn, 'monitoring')
self.assertRaises(
ks_exc.catalog.EndpointNotFound, getattr, conn, 'monitoring'
)
def test_no_endpoint_ignore_service_type(self):
"""Bogus service type disabled if not in requested service_types."""
self.assert_service_disabled(
'monitoring', "Not in the list of requested service_types.",
'monitoring',
"Not in the list of requested service_types.",
# 'monitoring' absent from this list
service_types={'compute', 'orchestration', 'bogus'})
service_types={'compute', 'orchestration', 'bogus'},
)

View File

@ -30,7 +30,8 @@ class TestFromSession(base.TestCase):
def test_from_session(self):
config = cloud_region.from_session(
self.cloud.session, region_name=self.test_region)
self.cloud.session, region_name=self.test_region
)
self.assertEqual(config.name, 'identity.example.com')
if not self.test_region:
self.assertIsNone(config.region_name)
@ -40,13 +41,18 @@ class TestFromSession(base.TestCase):
server_id = str(uuid.uuid4())
server_name = self.getUniqueString('name')
fake_server = fakes.make_fake_server(server_id, server_name)
self.register_uris([
self.get_nova_discovery_mock_dict(),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'detail']),
json={'servers': [fake_server]}),
])
self.register_uris(
[
self.get_nova_discovery_mock_dict(),
dict(
method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'detail']
),
json={'servers': [fake_server]},
),
]
)
conn = connection.Connection(config=config)
s = next(conn.compute.servers())

View File

@ -19,17 +19,16 @@ from openstack.tests.unit.config import base
class TestInit(base.TestCase):
def test_get_cloud_region_without_arg_parser(self):
cloud_region = openstack.config.get_cloud_region(
options=None, validate=False)
options=None, validate=False
)
self.assertIsInstance(
cloud_region,
openstack.config.cloud_region.CloudRegion
cloud_region, openstack.config.cloud_region.CloudRegion
)
def test_get_cloud_region_with_arg_parser(self):
cloud_region = openstack.config.get_cloud_region(
options=argparse.ArgumentParser(),
validate=False)
self.assertIsInstance(
cloud_region,
openstack.config.cloud_region.CloudRegion
options=argparse.ArgumentParser(), validate=False
)
self.assertIsInstance(
cloud_region, openstack.config.cloud_region.CloudRegion
)

View File

@ -24,7 +24,6 @@ from openstack.tests.unit.config import base
class TestConfig(base.TestCase):
def json_diagnostics(self, exc_info):
self.addDetail('filename', content.text_content(self.filename))
for error in sorted(self.validator.iter_errors(self.json_data)):
@ -32,8 +31,8 @@ class TestConfig(base.TestCase):
def test_defaults_valid_json(self):
_schema_path = os.path.join(
os.path.dirname(os.path.realpath(defaults.__file__)),
'schema.json')
os.path.dirname(os.path.realpath(defaults.__file__)), 'schema.json'
)
with open(_schema_path, 'r') as f:
schema = json.load(f)
self.validator = jsonschema.Draft4Validator(schema)
@ -41,7 +40,8 @@ class TestConfig(base.TestCase):
self.filename = os.path.join(
os.path.dirname(os.path.realpath(defaults.__file__)),
'defaults.json')
'defaults.json',
)
with open(self.filename, 'r') as f:
self.json_data = json.load(f)
@ -50,7 +50,8 @@ class TestConfig(base.TestCase):
def test_vendors_valid_json(self):
_schema_path = os.path.join(
os.path.dirname(os.path.realpath(defaults.__file__)),
'vendor-schema.json')
'vendor-schema.json',
)
with open(_schema_path, 'r') as f:
schema = json.load(f)
self.validator = jsonschema.Draft4Validator(schema)
@ -58,8 +59,8 @@ class TestConfig(base.TestCase):
self.addOnException(self.json_diagnostics)
_vendors_path = os.path.join(
os.path.dirname(os.path.realpath(defaults.__file__)),
'vendors')
os.path.dirname(os.path.realpath(defaults.__file__)), 'vendors'
)
for self.filename in glob.glob(os.path.join(_vendors_path, '*.json')):
with open(self.filename, 'r') as f:
self.json_data = json.load(f)

View File

@ -21,14 +21,17 @@ from openstack import exceptions
from openstack.tests.unit.config import base
FILES = {
'yaml': textwrap.dedent('''
'yaml': textwrap.dedent(
'''
foo: bar
baz:
- 1
- 2
- 3
'''),
'json': textwrap.dedent('''
'''
),
'json': textwrap.dedent(
'''
{
"foo": "bar",
"baz": [
@ -37,18 +40,20 @@ FILES = {
3
]
}
'''),
'txt': textwrap.dedent('''
'''
),
'txt': textwrap.dedent(
'''
foo
bar baz
test
one two
'''),
'''
),
}
class TestLoader(base.TestCase):
def test_base_load_yaml_json_file(self):
with tempfile.TemporaryDirectory() as tmpdir:
tested_files = []
@ -59,7 +64,8 @@ class TestLoader(base.TestCase):
tested_files.append(fn)
path, result = loader.OpenStackConfig()._load_yaml_json_file(
tested_files)
tested_files
)
# NOTE(hberaud): Prefer to test path rather than file because
# our FILES var is a dict so results are appened
# without keeping the initial order (python 3.5)
@ -77,7 +83,8 @@ class TestLoader(base.TestCase):
tested_files.append(fn)
path, result = loader.OpenStackConfig()._load_yaml_json_file(
tested_files)
tested_files
)
# NOTE(hberaud): Prefer to test path rather than file because
# our FILES var is a dict so results are appened
# without keeping the initial order (python 3.5)